Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 60ed380e authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull arm64 fixes from Catalin Marinas:

 - mm switching fix where the kernel pgd ends up in the user TTBR0 after
   returning from an EFI run-time services call

 - fix __GFP_ZERO handling for atomic pool and CMA DMA allocations (the
   generic code does get the gfp flags, so it's left with the arch code
   to memzero accordingly)

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: Honor __GFP_ZERO in dma allocations
  arm64: efi: don't restore TTBR0 if active_mm points at init_mm
parents 62a202d7 7132813c
Loading
Loading
Loading
Loading
+5 −1
Original line number Original line Diff line number Diff line
@@ -39,7 +39,11 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);


#include <asm/memory.h>
#include <asm/memory.h>


#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
#define cpu_switch_mm(pgd,mm)				\
do {							\
	BUG_ON(pgd == swapper_pg_dir);			\
	cpu_do_switch_mm(virt_to_phys(pgd),mm);		\
} while (0)


#define cpu_get_pgd()					\
#define cpu_get_pgd()					\
({							\
({							\
+5 −1
Original line number Original line Diff line number Diff line
@@ -337,7 +337,11 @@ core_initcall(arm64_dmi_init);


static void efi_set_pgd(struct mm_struct *mm)
static void efi_set_pgd(struct mm_struct *mm)
{
{
	if (mm == &init_mm)
		cpu_set_reserved_ttbr0();
	else
		cpu_switch_mm(mm->pgd, mm);
		cpu_switch_mm(mm->pgd, mm);

	flush_tlb_all();
	flush_tlb_all();
	if (icache_is_aivivt())
	if (icache_is_aivivt())
		__flush_icache_all();
		__flush_icache_all();
+9 −3
Original line number Original line Diff line number Diff line
@@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p)
}
}
early_param("coherent_pool", early_coherent_pool);
early_param("coherent_pool", early_coherent_pool);


static void *__alloc_from_pool(size_t size, struct page **ret_page)
static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
{
{
	unsigned long val;
	unsigned long val;
	void *ptr = NULL;
	void *ptr = NULL;
@@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)


		*ret_page = phys_to_page(phys);
		*ret_page = phys_to_page(phys);
		ptr = (void *)val;
		ptr = (void *)val;
		if (flags & __GFP_ZERO)
			memset(ptr, 0, size);
	}
	}


	return ptr;
	return ptr;
@@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
		flags |= GFP_DMA;
		flags |= GFP_DMA;
	if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
	if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
		struct page *page;
		struct page *page;
		void *addr;


		size = PAGE_ALIGN(size);
		size = PAGE_ALIGN(size);
		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
@@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
			return NULL;
			return NULL;


		*dma_handle = phys_to_dma(dev, page_to_phys(page));
		*dma_handle = phys_to_dma(dev, page_to_phys(page));
		return page_address(page);
		addr = page_address(page);
		if (flags & __GFP_ZERO)
			memset(addr, 0, size);
		return addr;
	} else {
	} else {
		return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
		return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
	}
	}
@@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size,


	if (!coherent && !(flags & __GFP_WAIT)) {
	if (!coherent && !(flags & __GFP_WAIT)) {
		struct page *page = NULL;
		struct page *page = NULL;
		void *addr = __alloc_from_pool(size, &page);
		void *addr = __alloc_from_pool(size, &page, flags);


		if (addr)
		if (addr)
			*dma_handle = phys_to_dma(dev, page_to_phys(page));
			*dma_handle = phys_to_dma(dev, page_to_phys(page));