Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e972a070 authored by David Rientjes's avatar David Rientjes Committed by Linus Torvalds
Browse files

mm, oom: rename zonelist locking functions



try_set_zonelist_oom() and clear_zonelist_oom() are not named properly
to imply that they require locking semantics to avoid out_of_memory()
being reordered.

zone_scan_lock is required for both functions to ensure that there is
proper locking synchronization.

Rename try_set_zonelist_oom() to oom_zonelist_trylock() and rename
clear_zonelist_oom() to oom_zonelist_unlock() to imply there is proper
locking semantics.

At the same time, convert oom_zonelist_trylock() to return bool instead
of int since only success and failure are tested.

Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8d060bf4
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -55,8 +55,8 @@ extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
			     struct mem_cgroup *memcg, nodemask_t *nodemask,
			     const char *message);

extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags);
extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags);

extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
			       int order, const nodemask_t *nodemask);
+13 −17
Original line number Diff line number Diff line
@@ -559,28 +559,25 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
 * if a parallel OOM killing is already taking place that includes a zone in
 * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
 */
int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask)
{
	struct zoneref *z;
	struct zone *zone;
	int ret = 1;
	bool ret = true;

	spin_lock(&zone_scan_lock);
	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
		if (zone_is_oom_locked(zone)) {
			ret = 0;
			ret = false;
			goto out;
		}
	}

	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
	/*
		 * Lock each zone in the zonelist under zone_scan_lock so a
		 * parallel invocation of try_set_zonelist_oom() doesn't succeed
		 * when it shouldn't.
	 * Lock each zone in the zonelist under zone_scan_lock so a parallel
	 * call to oom_zonelist_trylock() doesn't succeed when it shouldn't.
	 */
	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
		zone_set_flag(zone, ZONE_OOM_LOCKED);
	}

out:
	spin_unlock(&zone_scan_lock);
@@ -592,15 +589,14 @@ int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
 * allocation attempts with zonelists containing them may now recall the OOM
 * killer, if necessary.
 */
void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_mask)
{
	struct zoneref *z;
	struct zone *zone;

	spin_lock(&zone_scan_lock);
	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
		zone_clear_flag(zone, ZONE_OOM_LOCKED);
	}
	spin_unlock(&zone_scan_lock);
}

@@ -695,8 +691,8 @@ void pagefault_out_of_memory(void)
		return;

	zonelist = node_zonelist(first_memory_node, GFP_KERNEL);
	if (try_set_zonelist_oom(zonelist, GFP_KERNEL)) {
	if (oom_zonelist_trylock(zonelist, GFP_KERNEL)) {
		out_of_memory(NULL, 0, 0, NULL, false);
		clear_zonelist_oom(zonelist, GFP_KERNEL);
		oom_zonelist_unlock(zonelist, GFP_KERNEL);
	}
}
+3 −3
Original line number Diff line number Diff line
@@ -2246,8 +2246,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
{
	struct page *page;

	/* Acquire the OOM killer lock for the zones in zonelist */
	if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
	/* Acquire the per-zone oom lock for each zone */
	if (!oom_zonelist_trylock(zonelist, gfp_mask)) {
		schedule_timeout_uninterruptible(1);
		return NULL;
	}
@@ -2285,7 +2285,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
	out_of_memory(zonelist, gfp_mask, order, nodemask, false);

out:
	clear_zonelist_oom(zonelist, gfp_mask);
	oom_zonelist_unlock(zonelist, gfp_mask);
	return page;
}