Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3382290e authored by Will Deacon's avatar Will Deacon Committed by Ingo Molnar
Browse files

locking/barriers: Convert users of lockless_dereference() to READ_ONCE()



[ Note, this is a Git cherry-pick of the following commit:

    506458ef ("locking/barriers: Convert users of lockless_dereference() to READ_ONCE()")

  ... for easier x86 PTI code testing and back-porting. ]

READ_ONCE() now has an implicit smp_read_barrier_depends() call, so it
can be used instead of lockless_dereference() without any change in
semantics.

Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1508840570-22169-4-git-send-email-will.deacon@arm.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c2bc6608
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -2371,7 +2371,7 @@ static unsigned long get_segment_base(unsigned int segment)
		struct ldt_struct *ldt;
		struct ldt_struct *ldt;


		/* IRQs are off, so this synchronizes with smp_store_release */
		/* IRQs are off, so this synchronizes with smp_store_release */
		ldt = lockless_dereference(current->active_mm->context.ldt);
		ldt = READ_ONCE(current->active_mm->context.ldt);
		if (!ldt || idx >= ldt->nr_entries)
		if (!ldt || idx >= ldt->nr_entries)
			return 0;
			return 0;


+2 −2
Original line number Original line Diff line number Diff line
@@ -73,8 +73,8 @@ static inline void load_mm_ldt(struct mm_struct *mm)
#ifdef CONFIG_MODIFY_LDT_SYSCALL
#ifdef CONFIG_MODIFY_LDT_SYSCALL
	struct ldt_struct *ldt;
	struct ldt_struct *ldt;


	/* lockless_dereference synchronizes with smp_store_release */
	/* READ_ONCE synchronizes with smp_store_release */
	ldt = lockless_dereference(mm->context.ldt);
	ldt = READ_ONCE(mm->context.ldt);


	/*
	/*
	 * Any change to mm->context.ldt is followed by an IPI to all
	 * Any change to mm->context.ldt is followed by an IPI to all
+1 −1
Original line number Original line Diff line number Diff line
@@ -103,7 +103,7 @@ static void finalize_ldt_struct(struct ldt_struct *ldt)
static void install_ldt(struct mm_struct *current_mm,
static void install_ldt(struct mm_struct *current_mm,
			struct ldt_struct *ldt)
			struct ldt_struct *ldt)
{
{
	/* Synchronizes with lockless_dereference in load_mm_ldt. */
	/* Synchronizes with READ_ONCE in load_mm_ldt. */
	smp_store_release(&current_mm->context.ldt, ldt);
	smp_store_release(&current_mm->context.ldt, ldt);


	/* Activate the LDT for all CPUs using current_mm. */
	/* Activate the LDT for all CPUs using current_mm. */
+10 −10
Original line number Original line Diff line number Diff line
@@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m,


	pgpath = path_to_pgpath(path);
	pgpath = path_to_pgpath(path);


	if (unlikely(lockless_dereference(m->current_pg) != pg)) {
	if (unlikely(READ_ONCE(m->current_pg) != pg)) {
		/* Only update current_pgpath if pg changed */
		/* Only update current_pgpath if pg changed */
		spin_lock_irqsave(&m->lock, flags);
		spin_lock_irqsave(&m->lock, flags);
		m->current_pgpath = pgpath;
		m->current_pgpath = pgpath;
@@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
	}
	}


	/* Were we instructed to switch PG? */
	/* Were we instructed to switch PG? */
	if (lockless_dereference(m->next_pg)) {
	if (READ_ONCE(m->next_pg)) {
		spin_lock_irqsave(&m->lock, flags);
		spin_lock_irqsave(&m->lock, flags);
		pg = m->next_pg;
		pg = m->next_pg;
		if (!pg) {
		if (!pg) {
@@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)


	/* Don't change PG until it has no remaining paths */
	/* Don't change PG until it has no remaining paths */
check_current_pg:
check_current_pg:
	pg = lockless_dereference(m->current_pg);
	pg = READ_ONCE(m->current_pg);
	if (pg) {
	if (pg) {
		pgpath = choose_path_in_pg(m, pg, nr_bytes);
		pgpath = choose_path_in_pg(m, pg, nr_bytes);
		if (!IS_ERR_OR_NULL(pgpath))
		if (!IS_ERR_OR_NULL(pgpath))
@@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
	struct request *clone;
	struct request *clone;


	/* Do we need to select a new pgpath? */
	/* Do we need to select a new pgpath? */
	pgpath = lockless_dereference(m->current_pgpath);
	pgpath = READ_ONCE(m->current_pgpath);
	if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
	if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
		pgpath = choose_pgpath(m, nr_bytes);
		pgpath = choose_pgpath(m, nr_bytes);


@@ -535,7 +535,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
	bool queue_io;
	bool queue_io;


	/* Do we need to select a new pgpath? */
	/* Do we need to select a new pgpath? */
	pgpath = lockless_dereference(m->current_pgpath);
	pgpath = READ_ONCE(m->current_pgpath);
	queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
	queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
	if (!pgpath || !queue_io)
	if (!pgpath || !queue_io)
		pgpath = choose_pgpath(m, nr_bytes);
		pgpath = choose_pgpath(m, nr_bytes);
@@ -1804,7 +1804,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
	struct pgpath *current_pgpath;
	struct pgpath *current_pgpath;
	int r;
	int r;


	current_pgpath = lockless_dereference(m->current_pgpath);
	current_pgpath = READ_ONCE(m->current_pgpath);
	if (!current_pgpath)
	if (!current_pgpath)
		current_pgpath = choose_pgpath(m, 0);
		current_pgpath = choose_pgpath(m, 0);


@@ -1826,7 +1826,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
	}
	}


	if (r == -ENOTCONN) {
	if (r == -ENOTCONN) {
		if (!lockless_dereference(m->current_pg)) {
		if (!READ_ONCE(m->current_pg)) {
			/* Path status changed, redo selection */
			/* Path status changed, redo selection */
			(void) choose_pgpath(m, 0);
			(void) choose_pgpath(m, 0);
		}
		}
@@ -1895,9 +1895,9 @@ static int multipath_busy(struct dm_target *ti)
		return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
		return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);


	/* Guess which priority_group will be used at next mapping time */
	/* Guess which priority_group will be used at next mapping time */
	pg = lockless_dereference(m->current_pg);
	pg = READ_ONCE(m->current_pg);
	next_pg = lockless_dereference(m->next_pg);
	next_pg = READ_ONCE(m->next_pg);
	if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
	if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
		pg = next_pg;
		pg = next_pg;


	if (!pg) {
	if (!pg) {
+2 −2
Original line number Original line Diff line number Diff line
@@ -231,7 +231,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
{
{
	/*
	/*
	 * Be careful about RCU walk racing with rename:
	 * Be careful about RCU walk racing with rename:
	 * use 'lockless_dereference' to fetch the name pointer.
	 * use 'READ_ONCE' to fetch the name pointer.
	 *
	 *
	 * NOTE! Even if a rename will mean that the length
	 * NOTE! Even if a rename will mean that the length
	 * was not loaded atomically, we don't care. The
	 * was not loaded atomically, we don't care. The
@@ -245,7 +245,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
	 * early because the data cannot match (there can
	 * early because the data cannot match (there can
	 * be no NUL in the ct/tcount data)
	 * be no NUL in the ct/tcount data)
	 */
	 */
	const unsigned char *cs = lockless_dereference(dentry->d_name.name);
	const unsigned char *cs = READ_ONCE(dentry->d_name.name);


	return dentry_string_cmp(cs, ct, tcount);
	return dentry_string_cmp(cs, ct, tcount);
}
}
Loading