locking/qspinlock: Revert to test-and-set on hypervisors
authorPeter Zijlstra (Intel) <[email protected]>
Fri, 24 Apr 2015 18:56:36 +0000 (14:56 -0400)
committerIngo Molnar <[email protected]>
Fri, 8 May 2015 10:36:58 +0000 (12:36 +0200)
When we detect a hypervisor (!paravirt, see qspinlock paravirt support
patches), revert to a simple test-and-set lock to avoid the horrors
of queue preemption.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Waiman Long <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Boris Ostrovsky <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Daniel J Blueman <[email protected]>
Cc: David Vrabel <[email protected]>
Cc: Douglas Hatch <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Konrad Rzeszutek Wilk <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: Paolo Bonzini <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Raghavendra K T <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Scott J Norton <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
arch/x86/include/asm/qspinlock.h
include/asm-generic/qspinlock.h
kernel/locking/qspinlock.c

index e2aee82736646480e9cbbe2e9e3cbbefae87a51e..f079b7020e3f54410c1d39d904d3dd4f023d330a 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _ASM_X86_QSPINLOCK_H
 #define _ASM_X86_QSPINLOCK_H
 
+#include <asm/cpufeature.h>
 #include <asm-generic/qspinlock_types.h>
 
 #define        queued_spin_unlock queued_spin_unlock
@@ -15,6 +16,19 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
        smp_store_release((u8 *)lock, 0);
 }
 
+#define virt_queued_spin_lock virt_queued_spin_lock
+
+static inline bool virt_queued_spin_lock(struct qspinlock *lock)
+{
+       if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+               return false;
+
+       while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
+               cpu_relax();
+
+       return true;
+}
+
 #include <asm-generic/qspinlock.h>
 
 #endif /* _ASM_X86_QSPINLOCK_H */
index 569abcd47a9aec98b6e5d5755eb5e504cc7f65eb..83bfb87f5bf18e92ea794dd3ca3afec1b1ba6f11 100644 (file)
@@ -111,6 +111,13 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock)
                cpu_relax();
 }
 
+#ifndef virt_queued_spin_lock
+static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock)
+{
+       return false;
+}
+#endif
+
 /*
  * Initializier
  */
index 033872113ebbb4b12213deb75eb0b04307dfa315..fd31a474145d01623e58b1f692604447fd81dab1 100644 (file)
@@ -249,6 +249,9 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 
        BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
 
+       if (virt_queued_spin_lock(lock))
+               return;
+
        /*
         * wait for in-progress pending->locked hand-overs
         *