summary refs log tree commit diff
path: root/arch/s390/lib
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2014-09-19 14:29:31 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-09-25 10:52:05 +0200
commitd59b93da5e572703e1a7311c13dd3472a4e56e30 (patch)
tree4c75073780f54bc9785433256eee3d6d70eaf868 /arch/s390/lib
parent46b05c7bd51edafb8c8da088b49bddf7f78d48f9 (diff)
downloadlinux-d59b93da5e572703e1a7311c13dd3472a4e56e30.tar.gz
s390/rwlock: use directed yield for write-locked rwlocks
Add an owner field to the arch_rwlock_t to be able to pass the timeslice
of a virtual CPU with diagnose 0x9c to the lock owner in case the rwlock
is write-locked. The undirected yield in case the rwlock is acquired
writable but the lock is read-locked is removed.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/spinlock.c49
1 files changed, 30 insertions, 19 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 5b0e445bc3f3..5f63ac5783cb 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -98,17 +98,6 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
 }
 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
 
-void arch_spin_relax(arch_spinlock_t *lp)
-{
-	unsigned int cpu = lp->lock;
-	if (cpu != 0) {
-		if (MACHINE_IS_VM || MACHINE_IS_KVM ||
-		    !smp_vcpu_scheduled(~cpu))
-			smp_yield_cpu(~cpu);
-	}
-}
-EXPORT_SYMBOL(arch_spin_relax);
-
 int arch_spin_trylock_retry(arch_spinlock_t *lp)
 {
 	int count;
@@ -122,15 +111,18 @@ EXPORT_SYMBOL(arch_spin_trylock_retry);
 
 void _raw_read_lock_wait(arch_rwlock_t *rw)
 {
-	unsigned int old;
+	unsigned int owner, old;
 	int count = spin_retry;
 
+	owner = 0;
 	while (1) {
 		if (count-- <= 0) {
-			smp_yield();
+			if (owner && !smp_vcpu_scheduled(~owner))
+				smp_yield_cpu(~owner);
 			count = spin_retry;
 		}
 		old = ACCESS_ONCE(rw->lock);
+		owner = ACCESS_ONCE(rw->owner);
 		if ((int) old < 0)
 			continue;
 		if (_raw_compare_and_swap(&rw->lock, old, old + 1))
@@ -141,16 +133,19 @@ EXPORT_SYMBOL(_raw_read_lock_wait);
 
 void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
 {
-	unsigned int old;
+	unsigned int owner, old;
 	int count = spin_retry;
 
 	local_irq_restore(flags);
+	owner = 0;
 	while (1) {
 		if (count-- <= 0) {
-			smp_yield();
+			if (owner && !smp_vcpu_scheduled(~owner))
+				smp_yield_cpu(~owner);
 			count = spin_retry;
 		}
 		old = ACCESS_ONCE(rw->lock);
+		owner = ACCESS_ONCE(rw->owner);
 		if ((int) old < 0)
 			continue;
 		local_irq_disable();
@@ -179,15 +174,18 @@ EXPORT_SYMBOL(_raw_read_trylock_retry);
 
 void _raw_write_lock_wait(arch_rwlock_t *rw)
 {
-	unsigned int old;
+	unsigned int owner, old;
 	int count = spin_retry;
 
+	owner = 0;
 	while (1) {
 		if (count-- <= 0) {
-			smp_yield();
+			if (owner && !smp_vcpu_scheduled(~owner))
+				smp_yield_cpu(~owner);
 			count = spin_retry;
 		}
 		old = ACCESS_ONCE(rw->lock);
+		owner = ACCESS_ONCE(rw->owner);
 		if (old)
 			continue;
 		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
@@ -198,16 +196,19 @@ EXPORT_SYMBOL(_raw_write_lock_wait);
 
 void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
 {
-	unsigned int old;
+	unsigned int owner, old;
 	int count = spin_retry;
 
 	local_irq_restore(flags);
+	owner = 0;
 	while (1) {
 		if (count-- <= 0) {
-			smp_yield();
+			if (owner && !smp_vcpu_scheduled(~owner))
+				smp_yield_cpu(~owner);
 			count = spin_retry;
 		}
 		old = ACCESS_ONCE(rw->lock);
+		owner = ACCESS_ONCE(rw->owner);
 		if (old)
 			continue;
 		local_irq_disable();
@@ -233,3 +234,13 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
 	return 0;
 }
 EXPORT_SYMBOL(_raw_write_trylock_retry);
+
+void arch_lock_relax(unsigned int cpu)
+{
+	if (!cpu)
+		return;
+	if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu))
+		return;
+	smp_yield_cpu(~cpu);
+}
+EXPORT_SYMBOL(arch_lock_relax);