Author | Tokens | Token Proportion | Commits | Commit Proportion |
---|---|---|---|---|
Marco Elver | 1411 | 97.65% | 9 | 42.86% |
Linus Torvalds (pre-git) | 9 | 0.62% | 4 | 19.05% |
Rusty Russell | 9 | 0.62% | 1 | 4.76% |
Matthew Wilcox | 6 | 0.42% | 1 | 4.76% |
Jason A. Donenfeld | 5 | 0.35% | 3 | 14.29% |
Andrew Morton | 2 | 0.14% | 1 | 4.76% |
Ingo Molnar | 2 | 0.14% | 1 | 4.76% |
Greg Kroah-Hartman | 1 | 0.07% | 1 | 4.76% |
Total | 1445 | 21 |
// SPDX-License-Identifier: GPL-2.0 /* * KCSAN short boot-time selftests. * * Copyright (C) 2019, Google LLC. */ #define pr_fmt(fmt) "kcsan: " fmt #include <linux/atomic.h> #include <linux/bitops.h> #include <linux/init.h> #include <linux/kcsan-checks.h> #include <linux/kernel.h> #include <linux/printk.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/types.h> #include "encoding.h" #define ITERS_PER_TEST 2000 /* * Test watchpoint encode and decode: check that encoding some access's info, * and then subsequent decode preserves the access's info. */ static bool __init test_encode_decode(void) { int i; for (i = 0; i < ITERS_PER_TEST; ++i) { size_t size = get_random_u32_inclusive(1, MAX_ENCODABLE_SIZE); bool is_write = !!get_random_u32_below(2); unsigned long verif_masked_addr; long encoded_watchpoint; bool verif_is_write; unsigned long addr; size_t verif_size; get_random_bytes(&addr, sizeof(addr)); if (addr < PAGE_SIZE) addr = PAGE_SIZE; if (WARN_ON(!check_encodable(addr, size))) return false; encoded_watchpoint = encode_watchpoint(addr, size, is_write); /* Check special watchpoints */ if (WARN_ON(decode_watchpoint(INVALID_WATCHPOINT, &verif_masked_addr, &verif_size, &verif_is_write))) return false; if (WARN_ON(decode_watchpoint(CONSUMED_WATCHPOINT, &verif_masked_addr, &verif_size, &verif_is_write))) return false; /* Check decoding watchpoint returns same data */ if (WARN_ON(!decode_watchpoint(encoded_watchpoint, &verif_masked_addr, &verif_size, &verif_is_write))) return false; if (WARN_ON(verif_masked_addr != (addr & WATCHPOINT_ADDR_MASK))) goto fail; if (WARN_ON(verif_size != size)) goto fail; if (WARN_ON(is_write != verif_is_write)) goto fail; continue; fail: pr_err("%s fail: %s %zu bytes @ %lx -> encoded: %lx -> %s %zu bytes @ %lx\n", __func__, is_write ? "write" : "read", size, addr, encoded_watchpoint, verif_is_write ? "write" : "read", verif_size, verif_masked_addr); return false; } return true; } /* Test access matching function. */ static bool __init test_matching_access(void) { if (WARN_ON(!matching_access(10, 1, 10, 1))) return false; if (WARN_ON(!matching_access(10, 2, 11, 1))) return false; if (WARN_ON(!matching_access(10, 1, 9, 2))) return false; if (WARN_ON(matching_access(10, 1, 11, 1))) return false; if (WARN_ON(matching_access(9, 1, 10, 1))) return false; /* * An access of size 0 could match another access, as demonstrated here. * Rather than add more comparisons to 'matching_access()', which would * end up in the fast-path for *all* checks, check_access() simply * returns for all accesses of size 0. */ if (WARN_ON(!matching_access(8, 8, 12, 0))) return false; return true; } /* * Correct memory barrier instrumentation is critical to avoiding false * positives: simple test to check at boot certain barriers are always properly * instrumented. See kcsan_test for a more complete test. */ static DEFINE_SPINLOCK(test_spinlock); static bool __init test_barrier(void) { #ifdef CONFIG_KCSAN_WEAK_MEMORY struct kcsan_scoped_access *reorder_access = ¤t->kcsan_ctx.reorder_access; #else struct kcsan_scoped_access *reorder_access = NULL; #endif bool ret = true; arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED; atomic_t dummy; long test_var; if (!reorder_access || !IS_ENABLED(CONFIG_SMP)) return true; #define __KCSAN_CHECK_BARRIER(access_type, barrier, name) \ do { \ reorder_access->type = (access_type) | KCSAN_ACCESS_SCOPED; \ reorder_access->size = 1; \ barrier; \ if (reorder_access->size != 0) { \ pr_err("improperly instrumented type=(" #access_type "): " name "\n"); \ ret = false; \ } \ } while (0) #define KCSAN_CHECK_READ_BARRIER(b) __KCSAN_CHECK_BARRIER(0, b, #b) #define KCSAN_CHECK_WRITE_BARRIER(b) __KCSAN_CHECK_BARRIER(KCSAN_ACCESS_WRITE, b, #b) #define KCSAN_CHECK_RW_BARRIER(b) __KCSAN_CHECK_BARRIER(KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND, b, #b) kcsan_nestable_atomic_begin(); /* No watchpoints in called functions. */ KCSAN_CHECK_READ_BARRIER(mb()); KCSAN_CHECK_READ_BARRIER(rmb()); KCSAN_CHECK_READ_BARRIER(smp_mb()); KCSAN_CHECK_READ_BARRIER(smp_rmb()); KCSAN_CHECK_READ_BARRIER(dma_rmb()); KCSAN_CHECK_READ_BARRIER(smp_mb__before_atomic()); KCSAN_CHECK_READ_BARRIER(smp_mb__after_atomic()); KCSAN_CHECK_READ_BARRIER(smp_mb__after_spinlock()); KCSAN_CHECK_READ_BARRIER(smp_store_mb(test_var, 0)); KCSAN_CHECK_READ_BARRIER(smp_store_release(&test_var, 0)); KCSAN_CHECK_READ_BARRIER(xchg(&test_var, 0)); KCSAN_CHECK_READ_BARRIER(xchg_release(&test_var, 0)); KCSAN_CHECK_READ_BARRIER(cmpxchg(&test_var, 0, 0)); KCSAN_CHECK_READ_BARRIER(cmpxchg_release(&test_var, 0, 0)); KCSAN_CHECK_READ_BARRIER(atomic_set_release(&dummy, 0)); KCSAN_CHECK_READ_BARRIER(atomic_add_return(1, &dummy)); KCSAN_CHECK_READ_BARRIER(atomic_add_return_release(1, &dummy)); KCSAN_CHECK_READ_BARRIER(atomic_fetch_add(1, &dummy)); KCSAN_CHECK_READ_BARRIER(atomic_fetch_add_release(1, &dummy)); KCSAN_CHECK_READ_BARRIER(test_and_set_bit(0, &test_var)); KCSAN_CHECK_READ_BARRIER(test_and_clear_bit(0, &test_var)); KCSAN_CHECK_READ_BARRIER(test_and_change_bit(0, &test_var)); KCSAN_CHECK_READ_BARRIER(clear_bit_unlock(0, &test_var)); KCSAN_CHECK_READ_BARRIER(__clear_bit_unlock(0, &test_var)); arch_spin_lock(&arch_spinlock); KCSAN_CHECK_READ_BARRIER(arch_spin_unlock(&arch_spinlock)); spin_lock(&test_spinlock); KCSAN_CHECK_READ_BARRIER(spin_unlock(&test_spinlock)); KCSAN_CHECK_WRITE_BARRIER(mb()); KCSAN_CHECK_WRITE_BARRIER(wmb()); KCSAN_CHECK_WRITE_BARRIER(smp_mb()); KCSAN_CHECK_WRITE_BARRIER(smp_wmb()); KCSAN_CHECK_WRITE_BARRIER(dma_wmb()); KCSAN_CHECK_WRITE_BARRIER(smp_mb__before_atomic()); KCSAN_CHECK_WRITE_BARRIER(smp_mb__after_atomic()); KCSAN_CHECK_WRITE_BARRIER(smp_mb__after_spinlock()); KCSAN_CHECK_WRITE_BARRIER(smp_store_mb(test_var, 0)); KCSAN_CHECK_WRITE_BARRIER(smp_store_release(&test_var, 0)); KCSAN_CHECK_WRITE_BARRIER(xchg(&test_var, 0)); KCSAN_CHECK_WRITE_BARRIER(xchg_release(&test_var, 0)); KCSAN_CHECK_WRITE_BARRIER(cmpxchg(&test_var, 0, 0)); KCSAN_CHECK_WRITE_BARRIER(cmpxchg_release(&test_var, 0, 0)); KCSAN_CHECK_WRITE_BARRIER(atomic_set_release(&dummy, 0)); KCSAN_CHECK_WRITE_BARRIER(atomic_add_return(1, &dummy)); KCSAN_CHECK_WRITE_BARRIER(atomic_add_return_release(1, &dummy)); KCSAN_CHECK_WRITE_BARRIER(atomic_fetch_add(1, &dummy)); KCSAN_CHECK_WRITE_BARRIER(atomic_fetch_add_release(1, &dummy)); KCSAN_CHECK_WRITE_BARRIER(test_and_set_bit(0, &test_var)); KCSAN_CHECK_WRITE_BARRIER(test_and_clear_bit(0, &test_var)); KCSAN_CHECK_WRITE_BARRIER(test_and_change_bit(0, &test_var)); KCSAN_CHECK_WRITE_BARRIER(clear_bit_unlock(0, &test_var)); KCSAN_CHECK_WRITE_BARRIER(__clear_bit_unlock(0, &test_var)); arch_spin_lock(&arch_spinlock); KCSAN_CHECK_WRITE_BARRIER(arch_spin_unlock(&arch_spinlock)); spin_lock(&test_spinlock); KCSAN_CHECK_WRITE_BARRIER(spin_unlock(&test_spinlock)); KCSAN_CHECK_RW_BARRIER(mb()); KCSAN_CHECK_RW_BARRIER(wmb()); KCSAN_CHECK_RW_BARRIER(rmb()); KCSAN_CHECK_RW_BARRIER(smp_mb()); KCSAN_CHECK_RW_BARRIER(smp_wmb()); KCSAN_CHECK_RW_BARRIER(smp_rmb()); KCSAN_CHECK_RW_BARRIER(dma_wmb()); KCSAN_CHECK_RW_BARRIER(dma_rmb()); KCSAN_CHECK_RW_BARRIER(smp_mb__before_atomic()); KCSAN_CHECK_RW_BARRIER(smp_mb__after_atomic()); KCSAN_CHECK_RW_BARRIER(smp_mb__after_spinlock()); KCSAN_CHECK_RW_BARRIER(smp_store_mb(test_var, 0)); KCSAN_CHECK_RW_BARRIER(smp_store_release(&test_var, 0)); KCSAN_CHECK_RW_BARRIER(xchg(&test_var, 0)); KCSAN_CHECK_RW_BARRIER(xchg_release(&test_var, 0)); KCSAN_CHECK_RW_BARRIER(cmpxchg(&test_var, 0, 0)); KCSAN_CHECK_RW_BARRIER(cmpxchg_release(&test_var, 0, 0)); KCSAN_CHECK_RW_BARRIER(atomic_set_release(&dummy, 0)); KCSAN_CHECK_RW_BARRIER(atomic_add_return(1, &dummy)); KCSAN_CHECK_RW_BARRIER(atomic_add_return_release(1, &dummy)); KCSAN_CHECK_RW_BARRIER(atomic_fetch_add(1, &dummy)); KCSAN_CHECK_RW_BARRIER(atomic_fetch_add_release(1, &dummy)); KCSAN_CHECK_RW_BARRIER(test_and_set_bit(0, &test_var)); KCSAN_CHECK_RW_BARRIER(test_and_clear_bit(0, &test_var)); KCSAN_CHECK_RW_BARRIER(test_and_change_bit(0, &test_var)); KCSAN_CHECK_RW_BARRIER(clear_bit_unlock(0, &test_var)); KCSAN_CHECK_RW_BARRIER(__clear_bit_unlock(0, &test_var)); arch_spin_lock(&arch_spinlock); KCSAN_CHECK_RW_BARRIER(arch_spin_unlock(&arch_spinlock)); spin_lock(&test_spinlock); KCSAN_CHECK_RW_BARRIER(spin_unlock(&test_spinlock)); KCSAN_CHECK_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); KCSAN_CHECK_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); KCSAN_CHECK_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); kcsan_nestable_atomic_end(); return ret; } static int __init kcsan_selftest(void) { int passed = 0; int total = 0; #define RUN_TEST(do_test) \ do { \ ++total; \ if (do_test()) \ ++passed; \ else \ pr_err("selftest: " #do_test " failed"); \ } while (0) RUN_TEST(test_encode_decode); RUN_TEST(test_matching_access); RUN_TEST(test_barrier); pr_info("selftest: %d/%d tests passed\n", passed, total); if (passed != total) panic("selftests failed"); return 0; } postcore_initcall(kcsan_selftest);
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with Cregit http://github.com/cregit/cregit
Version 2.0-RC1