Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0e4c12b4 authored by Tom Lendacky's avatar Tom Lendacky Committed by Thomas Gleixner
Browse files

x86/mm, resource: Use PAGE_KERNEL protection for ioremap of memory pages



In order for memory pages to be properly mapped when SEV is active, it's
necessary to use the PAGE_KERNEL protection attribute as the base
protection.  This ensures that memory mapping of, e.g. ACPI tables,
receives the proper mapping attributes.

Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarBrijesh Singh <brijesh.singh@amd.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Tested-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Laura Abbott <labbott@redhat.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: kvm@vger.kernel.org
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Link: https://lkml.kernel.org/r/20171020143059.3291-11-brijesh.singh@amd.com
parent 1d2e733b
Loading
Loading
Loading
Loading
+67 −12
Original line number Diff line number Diff line
@@ -27,6 +27,11 @@

#include "physaddr.h"

struct ioremap_mem_flags {
	bool system_ram;
	bool desc_other;
};

/*
 * Fix up the linear direct mapping of the kernel to avoid cache attribute
 * conflicts.
@@ -56,17 +61,59 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
	return err;
}

static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
			       void *arg)
static bool __ioremap_check_ram(struct resource *res)
{
	unsigned long start_pfn, stop_pfn;
	unsigned long i;

	for (i = 0; i < nr_pages; ++i)
	if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
		return false;

	start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
	stop_pfn = (res->end + 1) >> PAGE_SHIFT;
	if (stop_pfn > start_pfn) {
		for (i = 0; i < (stop_pfn - start_pfn); ++i)
			if (pfn_valid(start_pfn + i) &&
			    !PageReserved(pfn_to_page(start_pfn + i)))
			return 1;
				return true;
	}

	return 0;
	return false;
}

static int __ioremap_check_desc_other(struct resource *res)
{
	return (res->desc != IORES_DESC_NONE);
}

static int __ioremap_res_check(struct resource *res, void *arg)
{
	struct ioremap_mem_flags *flags = arg;

	if (!flags->system_ram)
		flags->system_ram = __ioremap_check_ram(res);

	if (!flags->desc_other)
		flags->desc_other = __ioremap_check_desc_other(res);

	return flags->system_ram && flags->desc_other;
}

/*
 * To avoid multiple resource walks, this function walks resources marked as
 * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
 * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
 */
static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
				struct ioremap_mem_flags *flags)
{
	u64 start, end;

	start = (u64)addr;
	end = start + size - 1;
	memset(flags, 0, sizeof(*flags));

	walk_mem_res(start, end, flags, __ioremap_res_check);
}

/*
@@ -87,9 +134,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
		unsigned long size, enum page_cache_mode pcm, void *caller)
{
	unsigned long offset, vaddr;
	resource_size_t pfn, last_pfn, last_addr;
	resource_size_t last_addr;
	const resource_size_t unaligned_phys_addr = phys_addr;
	const unsigned long unaligned_size = size;
	struct ioremap_mem_flags mem_flags;
	struct vm_struct *area;
	enum page_cache_mode new_pcm;
	pgprot_t prot;
@@ -108,13 +156,12 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
		return NULL;
	}

	__ioremap_check_mem(phys_addr, size, &mem_flags);

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	pfn      = phys_addr >> PAGE_SHIFT;
	last_pfn = last_addr >> PAGE_SHIFT;
	if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
					  __ioremap_check_ram) == 1) {
	if (mem_flags.system_ram) {
		WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
			  &phys_addr, &last_addr);
		return NULL;
@@ -146,7 +193,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
		pcm = new_pcm;
	}

	/*
	 * If the page being mapped is in memory and SEV is active then
	 * make sure the memory encryption attribute is enabled in the
	 * resulting mapping.
	 */
	prot = PAGE_KERNEL_IO;
	if (sev_active() && mem_flags.desc_other)
		prot = pgprot_encrypted(prot);

	switch (pcm) {
	case _PAGE_CACHE_MODE_UC:
	default:
+3 −0
Original line number Diff line number Diff line
@@ -271,6 +271,9 @@ extern int
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
		void *arg, int (*func)(unsigned long, unsigned long, void *));
extern int
walk_mem_res(u64 start, u64 end, void *arg,
	     int (*func)(struct resource *, void *));
extern int
walk_system_ram_res(u64 start, u64 end, void *arg,
		    int (*func)(struct resource *, void *));
extern int
+19 −0
Original line number Diff line number Diff line
@@ -397,6 +397,8 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
		res->start = p->start;
	if (res->end > p->end)
		res->end = p->end;
	res->flags = p->flags;
	res->desc = p->desc;
	return 0;
}

@@ -467,6 +469,23 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
				     arg, func);
}

/*
 * This function calls the @func callback against all memory ranges, which
 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
 */
int walk_mem_res(u64 start, u64 end, void *arg,
		 int (*func)(struct resource *, void *))
{
	struct resource res;

	res.start = start;
	res.end = end;
	res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;

	return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
				     arg, func);
}

#if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)

/*