Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 35c754d7 authored by Balbir Singh's avatar Balbir Singh Committed by Linus Torvalds
Browse files

memory controller BUG_ON()



Move mem_controller_cache_charge() above radix_tree_preload().
radix_tree_preload() disables preemption, even though the gfp_mask passed
contains __GFP_WAIT, we cannot really do __GFP_WAIT allocations, thus we
hit a BUG_ON() in kmem_cache_alloc().

This patch moves mem_controller_cache_charge() to above radix_tree_preload()
for cache charging.

Signed-off-by: default avatarBalbir Singh <balbir@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 044d66c1
Loading
Loading
Loading
Loading
+6 −7
Original line number Diff line number Diff line
@@ -460,14 +460,12 @@ int filemap_write_and_wait_range(struct address_space *mapping,
int add_to_page_cache(struct page *page, struct address_space *mapping,
		pgoff_t offset, gfp_t gfp_mask)
{
	int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);

	if (error == 0) {

		error = mem_cgroup_cache_charge(page, current->mm, gfp_mask);
	int error = mem_cgroup_cache_charge(page, current->mm, gfp_mask);
	if (error)
		goto out;

	error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
	if (error == 0) {
		write_lock_irq(&mapping->tree_lock);
		error = radix_tree_insert(&mapping->page_tree, offset, page);
		if (!error) {
@@ -482,7 +480,8 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,

		write_unlock_irq(&mapping->tree_lock);
		radix_tree_preload_end();
	}
	} else
		mem_cgroup_uncharge_page(page);
out:
	return error;
}
+7 −6
Original line number Diff line number Diff line
@@ -75,13 +75,13 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
	BUG_ON(!PageLocked(page));
	BUG_ON(PageSwapCache(page));
	BUG_ON(PagePrivate(page));
	error = radix_tree_preload(gfp_mask);
	if (!error) {

	error = mem_cgroup_cache_charge(page, current->mm, gfp_mask);
	if (error)
		goto out;

	error = radix_tree_preload(gfp_mask);
	if (!error) {
		write_lock_irq(&swapper_space.tree_lock);
		error = radix_tree_insert(&swapper_space.page_tree,
						entry.val, page);
@@ -97,7 +97,8 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
		}
		write_unlock_irq(&swapper_space.tree_lock);
		radix_tree_preload_end();
	}
	} else
		mem_cgroup_uncharge_page(page);
out:
	return error;
}