Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2cf85583 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe
Browse files

memcontrol: schedule throttling if we are congested



Memory allocations can induce swapping via kswapd or direct reclaim.  If
we are having IO done for us by kswapd and don't actually go into direct
reclaim we may never get scheduled for throttling.  So instead check to
see if our cgroup is congested, and if so schedule the throttling.
Before we return to user space the throttling stuff will only throttle
if we actually required it.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarJosef Bacik <jbacik@fb.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d09d8df3
Loading
Loading
Loading
Loading
+13 −0
Original line number Original line Diff line number Diff line
@@ -317,6 +317,9 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
			  bool compound);
			  bool compound);
int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
			  bool compound);
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
			      bool lrucare, bool compound);
			      bool lrucare, bool compound);
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
@@ -789,6 +792,16 @@ static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
	return 0;
	return 0;
}
}


static inline int mem_cgroup_try_charge_delay(struct page *page,
					      struct mm_struct *mm,
					      gfp_t gfp_mask,
					      struct mem_cgroup **memcgp,
					      bool compound)
{
	*memcgp = NULL;
	return 0;
}

static inline void mem_cgroup_commit_charge(struct page *page,
static inline void mem_cgroup_commit_charge(struct page *page,
					    struct mem_cgroup *memcg,
					    struct mem_cgroup *memcg,
					    bool lrucare, bool compound)
					    bool lrucare, bool compound)
+10 −1
Original line number Original line Diff line number Diff line
@@ -629,7 +629,6 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)


	return memcg->swappiness;
	return memcg->swappiness;
}
}

#else
#else
static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
{
{
@@ -637,6 +636,16 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
}
}
#endif
#endif


#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
extern void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
					 gfp_t gfp_mask);
#else
static inline void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg,
						int node, gfp_t gfp_mask)
{
}
#endif

#ifdef CONFIG_MEMCG_SWAP
#ifdef CONFIG_MEMCG_SWAP
extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
+3 −3
Original line number Original line Diff line number Diff line
@@ -552,7 +552,7 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,


	VM_BUG_ON_PAGE(!PageCompound(page), page);
	VM_BUG_ON_PAGE(!PageCompound(page), page);


	if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) {
	if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) {
		put_page(page);
		put_page(page);
		count_vm_event(THP_FAULT_FALLBACK);
		count_vm_event(THP_FAULT_FALLBACK);
		return VM_FAULT_FALLBACK;
		return VM_FAULT_FALLBACK;
@@ -1142,7 +1142,7 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
		pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
		pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
					       vmf->address, page_to_nid(page));
					       vmf->address, page_to_nid(page));
		if (unlikely(!pages[i] ||
		if (unlikely(!pages[i] ||
			     mem_cgroup_try_charge(pages[i], vma->vm_mm,
			     mem_cgroup_try_charge_delay(pages[i], vma->vm_mm,
				     GFP_KERNEL, &memcg, false))) {
				     GFP_KERNEL, &memcg, false))) {
			if (pages[i])
			if (pages[i])
				put_page(pages[i]);
				put_page(pages[i]);
@@ -1312,7 +1312,7 @@ int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
		goto out;
		goto out;
	}
	}


	if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm,
	if (unlikely(mem_cgroup_try_charge_delay(new_page, vma->vm_mm,
					huge_gfp, &memcg, true))) {
					huge_gfp, &memcg, true))) {
		put_page(new_page);
		put_page(new_page);
		split_huge_pmd(vma, vmf->pmd, vmf->address);
		split_huge_pmd(vma, vmf->pmd, vmf->address);
+13 −0
Original line number Original line Diff line number Diff line
@@ -5593,6 +5593,19 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
	return ret;
	return ret;
}
}


int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
			  bool compound)
{
	struct mem_cgroup *memcg;
	int ret;

	ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp, compound);
	memcg = *memcgp;
	mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask);
	return ret;
}

/**
/**
 * mem_cgroup_commit_charge - commit a page charge
 * mem_cgroup_commit_charge - commit a page charge
 * @page: page to charge
 * @page: page to charge
+6 −5
Original line number Original line Diff line number Diff line
@@ -2503,7 +2503,7 @@ static int wp_page_copy(struct vm_fault *vmf)
		cow_user_page(new_page, old_page, vmf->address, vma);
		cow_user_page(new_page, old_page, vmf->address, vma);
	}
	}


	if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false))
	if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false))
		goto oom_free_new;
		goto oom_free_new;


	__SetPageUptodate(new_page);
	__SetPageUptodate(new_page);
@@ -3003,7 +3003,7 @@ int do_swap_page(struct vm_fault *vmf)
		goto out_page;
		goto out_page;
	}
	}


	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
	if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL,
					&memcg, false)) {
					&memcg, false)) {
		ret = VM_FAULT_OOM;
		ret = VM_FAULT_OOM;
		goto out_page;
		goto out_page;
@@ -3165,7 +3165,8 @@ static int do_anonymous_page(struct vm_fault *vmf)
	if (!page)
	if (!page)
		goto oom;
		goto oom;


	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
	if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, &memcg,
					false))
		goto oom_free_page;
		goto oom_free_page;


	/*
	/*
@@ -3661,7 +3662,7 @@ static int do_cow_fault(struct vm_fault *vmf)
	if (!vmf->cow_page)
	if (!vmf->cow_page)
		return VM_FAULT_OOM;
		return VM_FAULT_OOM;


	if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
	if (mem_cgroup_try_charge_delay(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
				&vmf->memcg, false)) {
				&vmf->memcg, false)) {
		put_page(vmf->cow_page);
		put_page(vmf->cow_page);
		return VM_FAULT_OOM;
		return VM_FAULT_OOM;
Loading