Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f2f09a4c authored by Christian Borntraeger's avatar Christian Borntraeger Committed by Ingo Molnar
Browse files

locking/core: Remove cpu_relax_lowlatency() users



With the s390 special case of a yielding cpu_relax() implementation gone,
we can now remove all users of cpu_relax_lowlatency() and replace them
with cpu_relax().

Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Noam Camus <noamc@ezchip.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linuxppc-dev@lists.ozlabs.org
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1477386195-32736-5-git-send-email-borntraeger@de.ibm.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 22b6430d
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -723,7 +723,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
		if (busywait_stop(timeout_us, cpu))
			break;

		cpu_relax_lowlatency();
		cpu_relax();
	} while (!need_resched());

	return false;
+2 −2
Original line number Diff line number Diff line
@@ -342,7 +342,7 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
		endtime = busy_clock() + vq->busyloop_timeout;
		while (vhost_can_busy_poll(vq->dev, endtime) &&
		       vhost_vq_avail_empty(vq->dev, vq))
			cpu_relax_lowlatency();
			cpu_relax();
		preempt_enable();
		r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
				      out_num, in_num, NULL, NULL);
@@ -533,7 +533,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
		while (vhost_can_busy_poll(&net->dev, endtime) &&
		       !sk_has_rx_data(sk) &&
		       vhost_vq_avail_empty(&net->dev, vq))
			cpu_relax_lowlatency();
			cpu_relax();

		preempt_enable();

+2 −2
Original line number Diff line number Diff line
@@ -28,7 +28,7 @@ struct mcs_spinlock {
#define arch_mcs_spin_lock_contended(l)					\
do {									\
	while (!(smp_load_acquire(l)))					\
		cpu_relax_lowlatency();					\
		cpu_relax();						\
} while (0)
#endif

@@ -108,7 +108,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
			return;
		/* Wait until the next pointer is set */
		while (!(next = READ_ONCE(node->next)))
			cpu_relax_lowlatency();
			cpu_relax();
	}

	/* Pass lock to next waiter. */
+2 −2
Original line number Diff line number Diff line
@@ -369,7 +369,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
			break;
		}

		cpu_relax_lowlatency();
		cpu_relax();
	}
	rcu_read_unlock();

@@ -492,7 +492,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
		 * memory barriers as we'll eventually observe the right
		 * values at the cost of a few extra spins.
		 */
		cpu_relax_lowlatency();
		cpu_relax();
	}

	if (!waiter)
+3 −3
Original line number Diff line number Diff line
@@ -75,7 +75,7 @@ osq_wait_next(struct optimistic_spin_queue *lock,
				break;
		}

		cpu_relax_lowlatency();
		cpu_relax();
	}

	return next;
@@ -122,7 +122,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
		if (need_resched())
			goto unqueue;

		cpu_relax_lowlatency();
		cpu_relax();
	}
	return true;

@@ -148,7 +148,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
		if (smp_load_acquire(&node->locked))
			return true;

		cpu_relax_lowlatency();
		cpu_relax();

		/*
		 * Or we race against a concurrent unqueue()'s step-B, in which
Loading