kcsan: Add kcsan_set_access_mask() support
authorMarco Elver <elver@google.com>
Tue, 11 Feb 2020 16:04:22 +0000 (17:04 +0100)
committerIngo Molnar <mingo@kernel.org>
Sat, 21 Mar 2020 08:44:08 +0000 (09:44 +0100)
When setting up an access mask with kcsan_set_access_mask(), KCSAN will
only report races if concurrent changes to bits set in access_mask are
observed. Conveying access_mask via a separate call avoids introducing
overhead in the common-case fast-path.

Acked-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/kcsan-checks.h
include/linux/kcsan.h
init/init_task.c
kernel/kcsan/core.c
kernel/kcsan/kcsan.h
kernel/kcsan/report.c

index 8675411c8dbcde023de68e0dfd28d17c11d03033..4ef5233ff3f04466d7934830ef810fdbd7fee3c5 100644 (file)
@@ -68,6 +68,16 @@ void kcsan_flat_atomic_end(void);
  */
 void kcsan_atomic_next(int n);
 
+/**
+ * kcsan_set_access_mask - set access mask
+ *
+ * Set the access mask for all accesses for the current context if non-zero.
+ * Only value changes to bits set in the mask will be reported.
+ *
+ * @mask bitmask
+ */
+void kcsan_set_access_mask(unsigned long mask);
+
 #else /* CONFIG_KCSAN */
 
 static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
@@ -78,6 +88,7 @@ static inline void kcsan_nestable_atomic_end(void)    { }
 static inline void kcsan_flat_atomic_begin(void)       { }
 static inline void kcsan_flat_atomic_end(void)         { }
 static inline void kcsan_atomic_next(int n)            { }
+static inline void kcsan_set_access_mask(unsigned long mask) { }
 
 #endif /* CONFIG_KCSAN */
 
index 7a614ca558f65fe380c7316e63157098c551ef1b..3b84606e1e6752eab6749cd3273faa4da7cf726d 100644 (file)
@@ -35,6 +35,11 @@ struct kcsan_ctx {
         */
        int atomic_nest_count;
        bool in_flat_atomic;
+
+       /*
+        * Access mask for all accesses if non-zero.
+        */
+       unsigned long access_mask;
 };
 
 /**
index 2b4fe98b0f0959a38063da2074b7bb317daaca9b..096191d177d5ccd40f6c6dd79a79802ea6df7254 100644 (file)
@@ -167,6 +167,7 @@ struct task_struct init_task
                .atomic_next            = 0,
                .atomic_nest_count      = 0,
                .in_flat_atomic         = false,
+               .access_mask            = 0,
        },
 #endif
 #ifdef CONFIG_TRACE_IRQFLAGS
index 3f89801161d33bd35a47bc2348463ea7a2487b0e..589b1e7f0f253cbecfe7680cdcc8bf6a8411f90e 100644 (file)
@@ -39,6 +39,7 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
        .atomic_next            = 0,
        .atomic_nest_count      = 0,
        .in_flat_atomic         = false,
+       .access_mask            = 0,
 };
 
 /*
@@ -298,6 +299,15 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
 
        if (!kcsan_is_enabled())
                return;
+
+       /*
+        * The access_mask check relies on value-change comparison. To avoid
+        * reporting a race where e.g. the writer set up the watchpoint, but the
+        * reader has access_mask!=0, we have to ignore the found watchpoint.
+        */
+       if (get_ctx()->access_mask != 0)
+               return;
+
        /*
         * Consume the watchpoint as soon as possible, to minimize the chances
         * of !consumed. Consuming the watchpoint must always be guarded by
@@ -341,6 +351,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
                u32 _4;
                u64 _8;
        } expect_value;
+       unsigned long access_mask;
        enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
        unsigned long ua_flags = user_access_save();
        unsigned long irq_flags;
@@ -435,18 +446,27 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
         * Re-read value, and check if it is as expected; if not, we infer a
         * racy access.
         */
+       access_mask = get_ctx()->access_mask;
        switch (size) {
        case 1:
                expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
+               if (access_mask)
+                       expect_value._1 &= (u8)access_mask;
                break;
        case 2:
                expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
+               if (access_mask)
+                       expect_value._2 &= (u16)access_mask;
                break;
        case 4:
                expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
+               if (access_mask)
+                       expect_value._4 &= (u32)access_mask;
                break;
        case 8:
                expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
+               if (access_mask)
+                       expect_value._8 &= (u64)access_mask;
                break;
        default:
                break; /* ignore; we do not diff the values */
@@ -460,11 +480,20 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
        if (!remove_watchpoint(watchpoint)) {
                /*
                 * Depending on the access type, map a value_change of MAYBE to
-                * TRUE (require reporting).
+                * TRUE (always report) or FALSE (never report).
                 */
-               if (value_change == KCSAN_VALUE_CHANGE_MAYBE && (size > 8 || is_assert)) {
-                       /* Always assume a value-change. */
-                       value_change = KCSAN_VALUE_CHANGE_TRUE;
+               if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
+                       if (access_mask != 0) {
+                               /*
+                                * For access with access_mask, we require a
+                                * value-change, as it is likely that races on
+                                * ~access_mask bits are expected.
+                                */
+                               value_change = KCSAN_VALUE_CHANGE_FALSE;
+                       } else if (size > 8 || is_assert) {
+                               /* Always assume a value-change. */
+                               value_change = KCSAN_VALUE_CHANGE_TRUE;
+                       }
                }
 
                /*
@@ -622,6 +651,12 @@ void kcsan_atomic_next(int n)
 }
 EXPORT_SYMBOL(kcsan_atomic_next);
 
+void kcsan_set_access_mask(unsigned long mask)
+{
+       get_ctx()->access_mask = mask;
+}
+EXPORT_SYMBOL(kcsan_set_access_mask);
+
 void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
 {
        check_access(ptr, size, type);
index 83a79b08b550e1f103bd99b13c576d032ad016e4..892de5120c1b6ed95e6a1c116a415989944a9492 100644 (file)
@@ -98,6 +98,11 @@ enum kcsan_value_change {
         */
        KCSAN_VALUE_CHANGE_MAYBE,
 
+       /*
+        * Did not observe a value-change, and it is invalid to report the race.
+        */
+       KCSAN_VALUE_CHANGE_FALSE,
+
        /*
         * The value was observed to change, and the race should be reported.
         */
index d871476dc1348e1c4a70320d536df1d130e33b23..11c791b886f3cadcd7e16bc1d71f1d93c3be72f8 100644 (file)
@@ -132,6 +132,9 @@ static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
 static bool
 skip_report(enum kcsan_value_change value_change, unsigned long top_frame)
 {
+       /* Should never get here if value_change==FALSE. */
+       WARN_ON_ONCE(value_change == KCSAN_VALUE_CHANGE_FALSE);
+
        /*
         * The first call to skip_report always has value_change==TRUE, since we
         * cannot know the value written of an instrumented access. For the 2nd
@@ -493,7 +496,15 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type,
 
        kcsan_disable_current();
        if (prepare_report(&flags, ptr, size, access_type, cpu_id, type)) {
-               if (print_report(ptr, size, access_type, value_change, cpu_id, type) && panic_on_warn)
+               /*
+                * Never report if value_change is FALSE, only if we it is
+                * either TRUE or MAYBE. In case of MAYBE, further filtering may
+                * be done once we know the full stack trace in print_report().
+                */
+               bool reported = value_change != KCSAN_VALUE_CHANGE_FALSE &&
+                               print_report(ptr, size, access_type, value_change, cpu_id, type);
+
+               if (reported && panic_on_warn)
                        panic("panic_on_warn set ...\n");
 
                release_report(&flags, type);