Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6122829b authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: slub: call kasan_alloc_pages before freeing pages in slub"

parents 93f8a090 e0a3c42d
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -23,6 +23,7 @@
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/iocontext.h>
#include <linux/kasan.h>
#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
@@ -158,6 +159,7 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,

static inline void free_thread_info(struct thread_info *ti)
{
	kasan_alloc_pages(virt_to_page(ti), THREAD_SIZE_ORDER);
	free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
}
# else
+1 −1
Original line number Diff line number Diff line
@@ -58,9 +58,9 @@ static void map_pages(struct list_head *list)
	struct page *page;

	list_for_each_entry(page, list, lru) {
		kasan_alloc_pages(page, 0);
		arch_alloc_page(page, 0);
		kernel_map_pages(page, 1, 1);
		kasan_alloc_pages(page, 0);
	}
}

+2 −2
Original line number Diff line number Diff line
@@ -760,7 +760,6 @@ static bool free_pages_prepare(struct page *page, unsigned int order)

	trace_mm_page_free(page, order);
	kmemcheck_free_shadow(page, order);
	kasan_free_pages(page, order);

	if (PageAnon(page))
		page->mapping = NULL;
@@ -777,6 +776,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
	}
	arch_free_page(page, order);
	kernel_map_pages(page, 1 << order, 0);
	kasan_free_pages(page, order);

	return true;
}
@@ -943,9 +943,9 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
	set_page_private(page, 0);
	set_page_refcounted(page);

	kasan_alloc_pages(page, order);
	arch_alloc_page(page, order);
	kernel_map_pages(page, 1 << order, 1);
	kasan_alloc_pages(page, order);

	if (gfp_flags & __GFP_ZERO)
		prep_zero_page(page, order, gfp_flags);
+2 −0
Original line number Diff line number Diff line
@@ -1485,6 +1485,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
	page_mapcount_reset(page);
	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += pages;
	kasan_alloc_pages(page, order);
	__free_pages(page, order);
	memcg_uncharge_slab(s, order);
}
@@ -3397,6 +3398,7 @@ void kfree(const void *x)
	if (unlikely(!PageSlab(page))) {
		BUG_ON(!PageCompound(page));
		kfree_hook(x);
		kasan_alloc_pages(page, compound_order(page));
		__free_kmem_pages(page, compound_order(page));
		return;
	}