Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 18ae7bde authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Merge branch 'android11-5.4' into branch 'android11-5.4-lts'



This is a backmerge of the recent commits from android11-5.4 into
android11-5.4-lts.  Included in here are the following commits.

* 85547bc0 Merge tag 'android11-5.4.274_r00' into branch 'android11-5.4'
* 519c36cd UPSTREAM: selftests: timers: Fix valid-adjtimex signed left-shift undefined behavior
* f952d4f3 ANDROID: 16K: Fix show maps CFI failure
* ecba20dd ANDROID: 16K: Handle pad VMA splits and merges
* 7231bbf0 ANDROID: 16K: madvise_vma_pad_pages: Remove filemap_fault check
* 95ac7272 ANDROID: 16K: Only madvise padding from dynamic linker context
* 1375f832 ANDROID: 16K: Separate padding from ELF LOAD segment mappings
* 6ad75e7a ANDROID: 16K: Exclude ELF padding for fault around range
* 0f0e4aae ANDROID: 16K: Use MADV_DONTNEED to save VMA padding pages.
* 05f9de39 ANDROID: 16K: Introduce ELF padding representation for VMAs
* c54460e9 ANDROID: 16K: Introduce /sys/kernel/mm/pgsize_miration/enabled
* a563a5f0 ANDROID: GKI: add snd_compr_stop_error to Xiaomi_abi
* 70c18002 UPSTREAM: netfilter: nf_tables: release mutex after nft_gc_seq_end from abort path
* a0aeb467 UPSTREAM: netfilter: nf_tables: release batch on table validation from abort path
* ba915b85 UPSTREAM: netfilter: nf_tables: mark set as dead when unbinding anonymous set with timeout
* 66f4b04c FROMLIST: binder: check offset alignment in binder_get_object()

Change-Id: Ic5b110b8083880521869f10cf92481fc57c3bde9
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@google.com>
parents 7b99a729 85547bc0
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@
  snd_soc_get_volsw_range
  snd_soc_info_volsw_range
  snd_soc_put_volsw_range
  snd_compr_stop_error

# required by cs35l45_dlkm.ko
  devm_snd_soc_register_component
+21 −3
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/pgsize_migration.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
#include <linux/swap.h>
@@ -419,7 +420,14 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)

static int show_map(struct seq_file *m, void *v)
{
	show_map_vma(m, v);
	struct vm_area_struct *pad_vma = get_pad_vma(v);
	struct vm_area_struct *vma = get_data_vma(v);

	if (vma_pages(vma))
		show_map_vma(m, vma);

	show_map_pad_vma(vma, pad_vma, m, show_map_vma, false);

	m_cache_vma(m, v);
	return 0;
}
@@ -877,7 +885,7 @@ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
	seq_puts(m, " kB\n");
}

static int show_smap(struct seq_file *m, void *v)
static void show_smap_vma(struct seq_file *m, void *v)
{
	struct vm_area_struct *vma = v;
	struct mem_size_stats mss;
@@ -906,9 +914,19 @@ static int show_smap(struct seq_file *m, void *v)
	if (arch_pkeys_enabled())
		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
	show_smap_vma_flags(m, vma);
}

	m_cache_vma(m, vma);
static int show_smap(struct seq_file *m, void *v)
{
	struct vm_area_struct *pad_vma = get_pad_vma(v);
	struct vm_area_struct *vma = get_data_vma(v);

	if (vma_pages(vma))
		show_smap_vma(m, vma);

	show_map_pad_vma(vma, pad_vma, m, show_smap_vma, true);

	m_cache_vma(m, v);
	return 0;
}

+133 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PAGE_SIZE_MIGRATION_H
#define _LINUX_PAGE_SIZE_MIGRATION_H

/*
 * Page Size Migration
 *
 * Copyright (c) 2024, Google LLC.
 * Author: Kalesh Singh <kaleshsingh@goole.com>
 *
 * This file contains the APIs for mitigations to ensure
 * app compatibility during the transition from 4kB to 16kB
 * page size in Android.
 */

#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/sizes.h>

/*
 * vm_flags representation of VMA padding pages.
 *
 * This allows the kernel to identify the portion of an ELF LOAD segment VMA
 * that is padding.
 *
 * 4 high bits of vm_flags [63,60] are used to represent ELF segment padding
 * up to 60kB, which is sufficient for ELFs of both 16kB and 64kB segment
 * alignment (p_align).
 *
 * The representation is illustrated below.
 *
 *                    63        62        61        60
 *                _________ _________ _________ _________
 *               |  Bit 3  |  Bit 2  |  Bit 1  |  Bit 0  |
 *               | of  4kB | of  4kB | of  4kB | of  4kB |
 *               |  chunks |  chunks |  chunks |  chunks |
 *               |_________|_________|_________|_________|
 */

#define VM_PAD_WIDTH		4
#define VM_PAD_SHIFT		(BITS_PER_LONG - VM_PAD_WIDTH)
#define VM_TOTAL_PAD_PAGES	((1ULL << VM_PAD_WIDTH) - 1)
#define VM_PAD_MASK		(VM_TOTAL_PAD_PAGES << VM_PAD_SHIFT)
#define VMA_PAD_START(vma)	(vma->vm_end - (vma_pad_pages(vma) << PAGE_SHIFT))

#if PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT)
extern void vma_set_pad_pages(struct vm_area_struct *vma,
			      unsigned long nr_pages);

extern unsigned long vma_pad_pages(struct vm_area_struct *vma);

extern void madvise_vma_pad_pages(struct vm_area_struct *vma,
				  unsigned long start, unsigned long end);

extern struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma);

extern struct vm_area_struct *get_data_vma(struct vm_area_struct *vma);

extern void show_map_pad_vma(struct vm_area_struct *vma,
			     struct vm_area_struct *pad,
			     struct seq_file *m, void *func, bool smaps);

extern void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
			  unsigned long addr, int new_below);
#else /* PAGE_SIZE != SZ_4K || !defined(CONFIG_64BIT) */
static inline void vma_set_pad_pages(struct vm_area_struct *vma,
				     unsigned long nr_pages)
{
}

static inline unsigned long vma_pad_pages(struct vm_area_struct *vma)
{
	return 0;
}

static inline void madvise_vma_pad_pages(struct vm_area_struct *vma,
					 unsigned long start, unsigned long end)
{
}

static inline struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma)
{
	return NULL;
}

static inline struct vm_area_struct *get_data_vma(struct vm_area_struct *vma)
{
	return vma;
}

static inline void show_map_pad_vma(struct vm_area_struct *vma,
				    struct vm_area_struct *pad,
				    struct seq_file *m, void *func, bool smaps)
{
}

static inline void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
				 unsigned long addr, int new_below)
{
}
#endif /* PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT) */

static inline unsigned long vma_data_pages(struct vm_area_struct *vma)
{
	return vma_pages(vma) - vma_pad_pages(vma);
}

/*
 * Sets the correct padding bits / flags for a VMA split.
 */
static inline unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
						unsigned long newflags)
{
	if (newflags & VM_PAD_MASK)
		return (newflags & ~VM_PAD_MASK) | (vma->vm_flags & VM_PAD_MASK);
	else
		return newflags;
}

/*
 * Merging of padding VMAs is uncommon, as padding is only allowed
 * from the linker context.
 *
 * To simplify the semantics, adjacent VMAs with padding are not
 * allowed to merge.
 */
static inline bool is_mergable_pad_vma(struct vm_area_struct *vma,
				       unsigned long vm_flags)
{
	/* Padding VMAs cannot be merged with other padding or real VMAs */
	return !((vma->vm_flags | vm_flags) & VM_PAD_MASK);
}
#endif /* _LINUX_PAGE_SIZE_MIGRATION_H */
+1 −1
Original line number Diff line number Diff line
@@ -42,7 +42,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
			   mm_init.o mmu_context.o percpu.o slab_common.o \
			   compaction.o vmacache.o \
			   interval_tree.o list_lru.o workingset.o \
			   debug.o gup.o $(mmu-y)
			   debug.o gup.o pgsize_migration.o $(mmu-y)

# Give 'page_alloc' its own module-parameter namespace
page-alloc-y := page_alloc.o
+3 −0
Original line number Diff line number Diff line
@@ -11,6 +11,7 @@
#include <linux/syscalls.h>
#include <linux/mempolicy.h>
#include <linux/page-isolation.h>
#include <linux/pgsize_migration.h>
#include <linux/page_idle.h>
#include <linux/userfaultfd_k.h>
#include <linux/hugetlb.h>
@@ -756,6 +757,8 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
					unsigned long start, unsigned long end)
{
	madvise_vma_pad_pages(vma, start, end);

	zap_page_range(vma, start, end - start);
	return 0;
}
Loading