From: Borislav Petkov Date: Tue, 11 Sep 2018 09:15:10 +0000 (+0200) Subject: x86/paravirt: Clean up native_patch() X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=3637897b6c9bc2f12f38956d64724a6d0bbb56fd;p=linux.git x86/paravirt: Clean up native_patch() When CONFIG_PARAVIRT_SPINLOCKS=n, it generates a warning: arch/x86/kernel/paravirt_patch_64.c: In function ‘native_patch’: arch/x86/kernel/paravirt_patch_64.c:89:1: warning: label ‘patch_site’ defined but not used [-Wunused-label] patch_site: ... but those labels can simply be removed by directly calling the respective functions there. Get rid of local variables too, while at it. Also, simplify function flow for better readability. Signed-off-by: Borislav Petkov Reviewed-by: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: virtualization@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180911091510.GA12094@zn.tnic Signed-off-by: Ingo Molnar --- diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index d460cbcabcfe0..6368c22fa1fa3 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c @@ -34,14 +34,10 @@ extern bool pv_is_native_vcpu_is_preempted(void); unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) { - const unsigned char *start, *end; - unsigned ret; - #define PATCH_SITE(ops, x) \ - case PARAVIRT_PATCH(ops.x): \ - start = start_##ops##_##x; \ - end = end_##ops##_##x; \ - goto patch_site + case PARAVIRT_PATCH(ops.x): \ + return paravirt_patch_insns(ibuf, len, start_##ops##_##x, end_##ops##_##x) + switch (type) { #ifdef CONFIG_PARAVIRT_XXL PATCH_SITE(irq, irq_disable); @@ -54,32 +50,24 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) PATCH_SITE(mmu, write_cr3); #endif #if defined(CONFIG_PARAVIRT_SPINLOCKS) - case PARAVIRT_PATCH(lock.queued_spin_unlock): - if (pv_is_native_spin_unlock()) { - start = start_lock_queued_spin_unlock; - end = end_lock_queued_spin_unlock; - goto patch_site; - } - goto patch_default; + case PARAVIRT_PATCH(lock.queued_spin_unlock): + if (pv_is_native_spin_unlock()) + return paravirt_patch_insns(ibuf, len, + start_lock_queued_spin_unlock, + end_lock_queued_spin_unlock); + break; - case PARAVIRT_PATCH(lock.vcpu_is_preempted): - if (pv_is_native_vcpu_is_preempted()) { - start = start_lock_vcpu_is_preempted; - end = end_lock_vcpu_is_preempted; - goto patch_site; - } - goto patch_default; + case PARAVIRT_PATCH(lock.vcpu_is_preempted): + if (pv_is_native_vcpu_is_preempted()) + return paravirt_patch_insns(ibuf, len, + start_lock_vcpu_is_preempted, + end_lock_vcpu_is_preempted); + break; #endif default: -patch_default: __maybe_unused - ret = paravirt_patch_default(type, ibuf, addr, len); - break; - -patch_site: - ret = paravirt_patch_insns(ibuf, len, start, end); break; } #undef PATCH_SITE - return ret; + return paravirt_patch_default(type, ibuf, addr, len); } diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index 5ad5bcda9dc61..7ca9cb726f4d6 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -42,15 +42,11 @@ extern bool pv_is_native_vcpu_is_preempted(void); unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) { - const unsigned char *start, *end; - unsigned ret; - #define PATCH_SITE(ops, x) \ - case PARAVIRT_PATCH(ops.x): \ - start = start_##ops##_##x; \ - end = end_##ops##_##x; \ - goto patch_site - switch(type) { + case PARAVIRT_PATCH(ops.x): \ + return paravirt_patch_insns(ibuf, len, start_##ops##_##x, end_##ops##_##x) + + switch (type) { #ifdef CONFIG_PARAVIRT_XXL PATCH_SITE(irq, restore_fl); PATCH_SITE(irq, save_fl); @@ -64,32 +60,24 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) PATCH_SITE(mmu, write_cr3); #endif #if defined(CONFIG_PARAVIRT_SPINLOCKS) - case PARAVIRT_PATCH(lock.queued_spin_unlock): - if (pv_is_native_spin_unlock()) { - start = start_lock_queued_spin_unlock; - end = end_lock_queued_spin_unlock; - goto patch_site; - } - goto patch_default; + case PARAVIRT_PATCH(lock.queued_spin_unlock): + if (pv_is_native_spin_unlock()) + return paravirt_patch_insns(ibuf, len, + start_lock_queued_spin_unlock, + end_lock_queued_spin_unlock); + break; - case PARAVIRT_PATCH(lock.vcpu_is_preempted): - if (pv_is_native_vcpu_is_preempted()) { - start = start_lock_vcpu_is_preempted; - end = end_lock_vcpu_is_preempted; - goto patch_site; - } - goto patch_default; + case PARAVIRT_PATCH(lock.vcpu_is_preempted): + if (pv_is_native_vcpu_is_preempted()) + return paravirt_patch_insns(ibuf, len, + start_lock_vcpu_is_preempted, + end_lock_vcpu_is_preempted); + break; #endif default: -patch_default: __maybe_unused - ret = paravirt_patch_default(type, ibuf, addr, len); - break; - -patch_site: - ret = paravirt_patch_insns(ibuf, len, start, end); break; } #undef PATCH_SITE - return ret; + return paravirt_patch_default(type, ibuf, addr, len); }