u32 dbsr;
 
 #ifdef CONFIG_KVM_EXIT_TIMING
+       struct mutex exit_timing_lock;
        struct kvmppc_exit_timing timing_exit;
        struct kvmppc_exit_timing timing_last_enter;
        u32 last_exit_type;
 
        tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
        vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
 
+#ifdef CONFIG_KVM_EXIT_TIMING
+       mutex_init(&vcpu->arch.exit_timing_lock);
+#endif
+
        return 0;
 }
 
 
 {
        int i;
 
-       /* pause guest execution to avoid concurrent updates */
-       mutex_lock(&vcpu->mutex);
+       /* Take a lock to avoid concurrent updates */
+       mutex_lock(&vcpu->arch.exit_timing_lock);
 
        vcpu->arch.last_exit_type = 0xDEAD;
        for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
        vcpu->arch.timing_exit.tv64 = 0;
        vcpu->arch.timing_last_enter.tv64 = 0;
 
-       mutex_unlock(&vcpu->mutex);
+       mutex_unlock(&vcpu->arch.exit_timing_lock);
 }
 
 static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
                return;
        }
 
+       mutex_lock(&vcpu->arch.exit_timing_lock);
+
        vcpu->arch.timing_count_type[type]++;
 
        /* sum */
                vcpu->arch.timing_min_duration[type] = duration;
        if (unlikely(duration > vcpu->arch.timing_max_duration[type]))
                vcpu->arch.timing_max_duration[type] = duration;
+
+       mutex_unlock(&vcpu->arch.exit_timing_lock);
 }
 
 void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu)