cpu: atomically modify cpu->exit_request
authorAlex Bennée <alex.bennee@linaro.org>
Fri, 30 Sep 2016 21:30:59 +0000 (22:30 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 4 Oct 2016 08:00:26 +0000 (10:00 +0200)
ThreadSanitizer picks up potential races although we already use
barriers to ensure things are in the correct order when processing exit
requests. For true C11 defined behaviour across threads we need to use
relaxed atomic_set/atomic_read semantics to reassure tsan.

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Message-Id: <20160930213106.20186-9-alex.bennee@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
cpu-exec.c
qom/cpu.c

index 8823d23df7e02c7d0287fd5df953f6626d7b120e..e114fcdf2962de903a992d927c43087fe627c29a 100644 (file)
@@ -192,7 +192,7 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
         /* We were asked to stop executing TBs (probably a pending
          * interrupt. We've now stopped, so clear the flag.
          */
-        cpu->tcg_exit_req = 0;
+        atomic_set(&cpu->tcg_exit_req, 0);
     }
     return ret;
 }
@@ -490,8 +490,8 @@ static inline void cpu_handle_interrupt(CPUState *cpu,
             *last_tb = NULL;
         }
     }
-    if (unlikely(cpu->exit_request || replay_has_interrupt())) {
-        cpu->exit_request = 0;
+    if (unlikely(atomic_read(&cpu->exit_request) || replay_has_interrupt())) {
+        atomic_set(&cpu->exit_request, 0);
         cpu->exception_index = EXCP_INTERRUPT;
         cpu_loop_exit(cpu);
     }
@@ -503,7 +503,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
 {
     uintptr_t ret;
 
-    if (unlikely(cpu->exit_request)) {
+    if (unlikely(atomic_read(&cpu->exit_request))) {
         return;
     }
 
index ef905da9becb5c626bfaba8e229086d379688170..e765bc0caf2c4f89c3cd8f6663ed017346e38323 100644 (file)
--- a/qom/cpu.c
+++ b/qom/cpu.c
@@ -120,10 +120,10 @@ void cpu_reset_interrupt(CPUState *cpu, int mask)
 
 void cpu_exit(CPUState *cpu)
 {
-    cpu->exit_request = 1;
+    atomic_set(&cpu->exit_request, 1);
     /* Ensure cpu_exec will see the exit request after TCG has exited.  */
     smp_wmb();
-    cpu->tcg_exit_req = 1;
+    atomic_set(&cpu->tcg_exit_req, 1);
 }
 
 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,