x86/paravirt: Fix native_patch()
authorPeter Zijlstra <[email protected]>
Thu, 8 Dec 2016 15:42:14 +0000 (16:42 +0100)
committerIngo Molnar <[email protected]>
Sun, 11 Dec 2016 12:09:19 +0000 (13:09 +0100)
While chasing a regression I noticed we potentially patch the wrong
code in native_patch().

If we do not select the native code sequence, we must use the default
patcher, not fall-through the switch case.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Alok Kataria <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Chris Wright <[email protected]>
Cc: Jeremy Fitzhardinge <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Pan Xinhui <[email protected]>
Cc: Paolo Bonzini <[email protected]>
Cc: Peter Anvin <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Rusty Russell <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: kernel test robot <[email protected]>
Fixes: 3cded4179481 ("x86/paravirt: Optimize native pv_lock_ops.vcpu_is_preempted()")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
arch/x86/kernel/paravirt_patch_32.c
arch/x86/kernel/paravirt_patch_64.c

index ff03dbd286251b254f9cf1d0d063ddc0489e8c4a..33cdec221f3da99836a76a5864780c77c1073efe 100644 (file)
@@ -58,15 +58,19 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                                end   = end_pv_lock_ops_queued_spin_unlock;
                                goto patch_site;
                        }
+                       goto patch_default;
+
                case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
                        if (pv_is_native_vcpu_is_preempted()) {
                                start = start_pv_lock_ops_vcpu_is_preempted;
                                end   = end_pv_lock_ops_vcpu_is_preempted;
                                goto patch_site;
                        }
+                       goto patch_default;
 #endif
 
        default:
+patch_default:
                ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
                break;
 
index e61dd9791f4fd400dd61ebfd1195e29a0088963a..b0fceff502b30fb41f752306c0d598c70cad08da 100644 (file)
@@ -70,15 +70,19 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                                end   = end_pv_lock_ops_queued_spin_unlock;
                                goto patch_site;
                        }
+                       goto patch_default;
+
                case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
                        if (pv_is_native_vcpu_is_preempted()) {
                                start = start_pv_lock_ops_vcpu_is_preempted;
                                end   = end_pv_lock_ops_vcpu_is_preempted;
                                goto patch_site;
                        }
+                       goto patch_default;
 #endif
 
        default:
+patch_default:
                ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
                break;