Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4db0c3c2 authored by Jason Low's avatar Jason Low Committed by Linus Torvalds
Browse files

mm: remove rest of ACCESS_ONCE() usages



We converted some of the usages of ACCESS_ONCE to READ_ONCE in the mm/
tree since it doesn't work reliably on non-scalar types.

This patch removes the rest of the usages of ACCESS_ONCE, and use the new
READ_ONCE API for the read accesses.  This makes things cleaner, instead
of using separate/multiple sets of APIs.

Signed-off-by: default avatarJason Low <jason.low2@hp.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Acked-by: default avatarDavidlohr Bueso <dave@stgolabs.net>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Reviewed-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9d8c47e4
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -183,7 +183,7 @@ static struct page *get_huge_zero_page(void)
	struct page *zero_page;
	struct page *zero_page;
retry:
retry:
	if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
	if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
		return ACCESS_ONCE(huge_zero_page);
		return READ_ONCE(huge_zero_page);


	zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
	zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
			HPAGE_PMD_ORDER);
			HPAGE_PMD_ORDER);
@@ -202,7 +202,7 @@ static struct page *get_huge_zero_page(void)
	/* We take additional reference here. It will be put back by shrinker */
	/* We take additional reference here. It will be put back by shrinker */
	atomic_set(&huge_zero_refcount, 2);
	atomic_set(&huge_zero_refcount, 2);
	preempt_enable();
	preempt_enable();
	return ACCESS_ONCE(huge_zero_page);
	return READ_ONCE(huge_zero_page);
}
}


static void put_huge_zero_page(void)
static void put_huge_zero_page(void)
+2 −2
Original line number Original line Diff line number Diff line
@@ -224,13 +224,13 @@ static inline unsigned long page_order(struct page *page)
 * PageBuddy() should be checked first by the caller to minimize race window,
 * PageBuddy() should be checked first by the caller to minimize race window,
 * and invalid values must be handled gracefully.
 * and invalid values must be handled gracefully.
 *
 *
 * ACCESS_ONCE is used so that if the caller assigns the result into a local
 * READ_ONCE is used so that if the caller assigns the result into a local
 * variable and e.g. tests it for valid range before using, the compiler cannot
 * variable and e.g. tests it for valid range before using, the compiler cannot
 * decide to remove the variable and inline the page_private(page) multiple
 * decide to remove the variable and inline the page_private(page) multiple
 * times, potentially observing different values in the tests and the actual
 * times, potentially observing different values in the tests and the actual
 * use of the result.
 * use of the result.
 */
 */
#define page_order_unsafe(page)		ACCESS_ONCE(page_private(page))
#define page_order_unsafe(page)		READ_ONCE(page_private(page))


static inline bool is_cow_mapping(vm_flags_t flags)
static inline bool is_cow_mapping(vm_flags_t flags)
{
{
+5 −5
Original line number Original line Diff line number Diff line
@@ -542,7 +542,7 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
	expected_mapping = (void *)stable_node +
	expected_mapping = (void *)stable_node +
				(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
				(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
again:
again:
	kpfn = ACCESS_ONCE(stable_node->kpfn);
	kpfn = READ_ONCE(stable_node->kpfn);
	page = pfn_to_page(kpfn);
	page = pfn_to_page(kpfn);


	/*
	/*
@@ -551,7 +551,7 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
	 * but on Alpha we need to be more careful.
	 * but on Alpha we need to be more careful.
	 */
	 */
	smp_read_barrier_depends();
	smp_read_barrier_depends();
	if (ACCESS_ONCE(page->mapping) != expected_mapping)
	if (READ_ONCE(page->mapping) != expected_mapping)
		goto stale;
		goto stale;


	/*
	/*
@@ -577,14 +577,14 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
		cpu_relax();
		cpu_relax();
	}
	}


	if (ACCESS_ONCE(page->mapping) != expected_mapping) {
	if (READ_ONCE(page->mapping) != expected_mapping) {
		put_page(page);
		put_page(page);
		goto stale;
		goto stale;
	}
	}


	if (lock_it) {
	if (lock_it) {
		lock_page(page);
		lock_page(page);
		if (ACCESS_ONCE(page->mapping) != expected_mapping) {
		if (READ_ONCE(page->mapping) != expected_mapping) {
			unlock_page(page);
			unlock_page(page);
			put_page(page);
			put_page(page);
			goto stale;
			goto stale;
@@ -600,7 +600,7 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
	 * before checking whether node->kpfn has been changed.
	 * before checking whether node->kpfn has been changed.
	 */
	 */
	smp_rmb();
	smp_rmb();
	if (ACCESS_ONCE(stable_node->kpfn) != kpfn)
	if (READ_ONCE(stable_node->kpfn) != kpfn)
		goto again;
		goto again;
	remove_node_from_stable_tree(stable_node);
	remove_node_from_stable_tree(stable_node);
	return NULL;
	return NULL;
+9 −9
Original line number Original line Diff line number Diff line
@@ -674,7 +674,7 @@ static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
{
{
	unsigned long nr_pages = page_counter_read(&memcg->memory);
	unsigned long nr_pages = page_counter_read(&memcg->memory);
	unsigned long soft_limit = ACCESS_ONCE(memcg->soft_limit);
	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
	unsigned long excess = 0;
	unsigned long excess = 0;


	if (nr_pages > soft_limit)
	if (nr_pages > soft_limit)
@@ -1042,7 +1042,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
			goto out_unlock;
			goto out_unlock;


		do {
		do {
			pos = ACCESS_ONCE(iter->position);
			pos = READ_ONCE(iter->position);
			/*
			/*
			 * A racing update may change the position and
			 * A racing update may change the position and
			 * put the last reference, hence css_tryget(),
			 * put the last reference, hence css_tryget(),
@@ -1359,13 +1359,13 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
	unsigned long limit;
	unsigned long limit;


	count = page_counter_read(&memcg->memory);
	count = page_counter_read(&memcg->memory);
	limit = ACCESS_ONCE(memcg->memory.limit);
	limit = READ_ONCE(memcg->memory.limit);
	if (count < limit)
	if (count < limit)
		margin = limit - count;
		margin = limit - count;


	if (do_swap_account) {
	if (do_swap_account) {
		count = page_counter_read(&memcg->memsw);
		count = page_counter_read(&memcg->memsw);
		limit = ACCESS_ONCE(memcg->memsw.limit);
		limit = READ_ONCE(memcg->memsw.limit);
		if (count <= limit)
		if (count <= limit)
			margin = min(margin, limit - count);
			margin = min(margin, limit - count);
	}
	}
@@ -2637,7 +2637,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
		return cachep;
		return cachep;


	memcg = get_mem_cgroup_from_mm(current->mm);
	memcg = get_mem_cgroup_from_mm(current->mm);
	kmemcg_id = ACCESS_ONCE(memcg->kmemcg_id);
	kmemcg_id = READ_ONCE(memcg->kmemcg_id);
	if (kmemcg_id < 0)
	if (kmemcg_id < 0)
		goto out;
		goto out;


@@ -5007,7 +5007,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
	 * tunable will only affect upcoming migrations, not the current one.
	 * tunable will only affect upcoming migrations, not the current one.
	 * So we need to save it, and keep it going.
	 * So we need to save it, and keep it going.
	 */
	 */
	move_flags = ACCESS_ONCE(memcg->move_charge_at_immigrate);
	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
	if (move_flags) {
	if (move_flags) {
		struct mm_struct *mm;
		struct mm_struct *mm;
		struct mem_cgroup *from = mem_cgroup_from_task(p);
		struct mem_cgroup *from = mem_cgroup_from_task(p);
@@ -5241,7 +5241,7 @@ static u64 memory_current_read(struct cgroup_subsys_state *css,
static int memory_low_show(struct seq_file *m, void *v)
static int memory_low_show(struct seq_file *m, void *v)
{
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
	unsigned long low = ACCESS_ONCE(memcg->low);
	unsigned long low = READ_ONCE(memcg->low);


	if (low == PAGE_COUNTER_MAX)
	if (low == PAGE_COUNTER_MAX)
		seq_puts(m, "max\n");
		seq_puts(m, "max\n");
@@ -5271,7 +5271,7 @@ static ssize_t memory_low_write(struct kernfs_open_file *of,
static int memory_high_show(struct seq_file *m, void *v)
static int memory_high_show(struct seq_file *m, void *v)
{
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
	unsigned long high = ACCESS_ONCE(memcg->high);
	unsigned long high = READ_ONCE(memcg->high);


	if (high == PAGE_COUNTER_MAX)
	if (high == PAGE_COUNTER_MAX)
		seq_puts(m, "max\n");
		seq_puts(m, "max\n");
@@ -5301,7 +5301,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
static int memory_max_show(struct seq_file *m, void *v)
static int memory_max_show(struct seq_file *m, void *v)
{
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
	unsigned long max = ACCESS_ONCE(memcg->memory.limit);
	unsigned long max = READ_ONCE(memcg->memory.limit);


	if (max == PAGE_COUNTER_MAX)
	if (max == PAGE_COUNTER_MAX)
		seq_puts(m, "max\n");
		seq_puts(m, "max\n");
+1 −1
Original line number Original line Diff line number Diff line
@@ -2845,7 +2845,7 @@ static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
	struct vm_fault vmf;
	struct vm_fault vmf;
	int off;
	int off;


	nr_pages = ACCESS_ONCE(fault_around_bytes) >> PAGE_SHIFT;
	nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
	mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
	mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;


	start_addr = max(address & mask, vma->vm_start);
	start_addr = max(address & mask, vma->vm_start);
Loading