Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d8d1bc74 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: switch KASan hook calling order in page alloc/free path"

parents de3f2b7f f0612483
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -651,6 +651,15 @@ config MAX_STACK_SIZE_MB

	  A sane initial value is 80 MB.

config BALANCE_ANON_FILE_RECLAIM
	bool "During reclaim treat anon and file backed pages equally"
	depends on SWAP
	help
	  When performing memory reclaim treat anonymous and file backed pages
	  equally.
	  Swapping anonymous pages out to memory can be efficient enough to justify
	  treating anonymous and file backed pages equally.

config DEFERRED_STRUCT_PAGE_INIT
	bool "Defer initialisation of struct pages to kthreads"
	default n
+5 −0
Original line number Diff line number Diff line
@@ -2521,6 +2521,11 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
		do_async_mmap_readahead(vmf->vma, ra, file, page, offset);
	} else if (!page) {
		/* No page in the page cache at all */
		struct address_space *mapping = file->f_mapping;

		if (mapping && (mapping->gfp_mask & __GFP_MOVABLE))
			mapping->gfp_mask |= __GFP_CMA;

		do_sync_mmap_readahead(vmf->vma, ra, file, offset);
		count_vm_event(PGMAJFAULT);
		count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
+5 −2
Original line number Diff line number Diff line
@@ -799,7 +799,8 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
	memblock_dbg("   memblock_free: [%pa-%pa] %pF\n",
		     &base, &end, (void *)_RET_IP_);

	kmemleak_free_part_phys(base, size);
	if (base < memblock.current_limit)
		kmemleak_free_part(__va(base), size);
	return memblock_remove_range(&memblock.reserved, base, size);
}

@@ -1248,7 +1249,9 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
		 * The min_count is set to 0 so that memblock allocations are
		 * never reported as leaks.
		 */
		kmemleak_alloc_phys(found, size, 0, 0);
		if (found < memblock.current_limit)
			kmemleak_alloc(__va(found), size, 0, 0);

		return found;
	}
	return 0;
+1 −1
Original line number Diff line number Diff line
@@ -1913,9 +1913,9 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
	set_page_refcounted(page);

	arch_alloc_page(page, order);
	kasan_alloc_pages(page, order);
	kernel_map_pages(page, 1 << order, 1);
	kernel_poison_pages(page, 1 << order, 1);
	kasan_alloc_pages(page, order);
	set_page_owner(page, order, gfp_flags);
}

+7 −2
Original line number Diff line number Diff line
@@ -458,6 +458,10 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
	long batch_size = shrinker->batch ? shrinker->batch
					  : SHRINK_BATCH;
	long scanned = 0, next_deferred;
	long min_cache_size = batch_size;

	if (current_is_kswapd())
		min_cache_size = 0;

	if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
		nid = 0;
@@ -538,7 +542,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
	 * scanning at high prio and therefore should try to reclaim as much as
	 * possible.
	 */
	while (total_scan >= batch_size ||
	while (total_scan > min_cache_size ||
	       total_scan >= freeable) {
		unsigned long ret;
		unsigned long nr_to_scan = min(batch_size, total_scan);
@@ -2377,7 +2381,8 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
	 * lruvec even if it has plenty of old anonymous pages unless the
	 * system is under heavy pressure.
	 */
	if (!inactive_list_is_low(lruvec, true, memcg, sc, false) &&
	if (!IS_ENABLED(CONFIG_BALANCE_ANON_FILE_RECLAIM) &&
	    !inactive_list_is_low(lruvec, true, memcg, sc, false) &&
	    lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
		scan_balance = SCAN_FILE;
		goto out;