KVM: PPC: Book3S HV: Cosmetic post-merge cleanups
authorPaul Mackerras <paulus@ozlabs.org>
Thu, 9 Nov 2017 04:37:10 +0000 (15:37 +1100)
committerPaul Mackerras <paulus@ozlabs.org>
Thu, 9 Nov 2017 04:37:10 +0000 (15:37 +1100)
This rearranges the code in kvmppc_run_vcpu() and kvmppc_run_vcpu_hv()
to be neater and clearer.  Deeply indented code in kvmppc_run_vcpu()
is moved out to a helper function, kvmhv_setup_mmu().  In
kvmppc_vcpu_run_hv(), make use of the existing variable 'kvm' in
place of 'vcpu->kvm'.

No functional change.

Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
arch/powerpc/kvm/book3s_hv.c

index ca0d4d938d6a373037608dba80e472e022f5c44d..18b16c3957fcd745c0f65e532f75b878196e280b 100644 (file)
@@ -3120,6 +3120,25 @@ out:
        trace_kvmppc_vcore_wakeup(do_sleep, block_ns);
 }
 
+static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
+{
+       int r = 0;
+       struct kvm *kvm = vcpu->kvm;
+
+       mutex_lock(&kvm->lock);
+       if (!kvm->arch.mmu_ready) {
+               if (!kvm_is_radix(kvm))
+                       r = kvmppc_hv_setup_htab_rma(vcpu);
+               if (!r) {
+                       if (cpu_has_feature(CPU_FTR_ARCH_300))
+                               kvmppc_setup_partition_table(kvm);
+                       kvm->arch.mmu_ready = 1;
+               }
+       }
+       mutex_unlock(&kvm->lock);
+       return r;
+}
+
 static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
        int n_ceded, i, r;
@@ -3179,22 +3198,12 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
                /* See if the MMU is ready to go */
                if (!vcpu->kvm->arch.mmu_ready) {
                        spin_unlock(&vc->lock);
-                       mutex_lock(&vcpu->kvm->lock);
-                       r = 0;
-                       if (!vcpu->kvm->arch.mmu_ready) {
-                               if (!kvm_is_radix(vcpu->kvm))
-                                       r = kvmppc_hv_setup_htab_rma(vcpu);
-                               if (!r) {
-                                       if (cpu_has_feature(CPU_FTR_ARCH_300))
-                                               kvmppc_setup_partition_table(vcpu->kvm);
-                                       vcpu->kvm->arch.mmu_ready = 1;
-                               }
-                       }
-                       mutex_unlock(&vcpu->kvm->lock);
+                       r = kvmhv_setup_mmu(vcpu);
                        spin_lock(&vc->lock);
                        if (r) {
                                kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
-                               kvm_run->fail_entry.hardware_entry_failure_reason = 0;
+                               kvm_run->fail_entry.
+                                       hardware_entry_failure_reason = 0;
                                vcpu->arch.ret = r;
                                break;
                        }
@@ -3344,10 +3353,10 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
                        trace_kvm_hcall_exit(vcpu, r);
                        kvmppc_core_prepare_to_enter(vcpu);
                } else if (r == RESUME_PAGE_FAULT) {
-                       srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+                       srcu_idx = srcu_read_lock(&kvm->srcu);
                        r = kvmppc_book3s_hv_page_fault(run, vcpu,
                                vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
-                       srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
+                       srcu_read_unlock(&kvm->srcu, srcu_idx);
                } else if (r == RESUME_PASSTHROUGH) {
                        if (WARN_ON(xive_enabled()))
                                r = H_SUCCESS;
@@ -3367,7 +3376,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
        mtspr(SPRN_VRSAVE, user_vrsave);
 
        vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
-       atomic_dec(&vcpu->kvm->arch.vcpus_running);
+       atomic_dec(&kvm->arch.vcpus_running);
        return r;
 }