Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7880f746 authored by Venkatesh Pallipadi's avatar Venkatesh Pallipadi Committed by Ingo Molnar
Browse files

gpu/drm, x86, PAT: routine to keep identity map in sync



Add a function to check and keep identity maps in sync, when changing
any memory type. One of the follow on patches will also use this
routine.

Signed-off-by: default avatarVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
Cc: Eric Anholt <eric@anholt.net>
Cc: Keith Packard <keithp@keithp.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 4ab0d47d
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -19,4 +19,7 @@ extern int free_memtype(u64 start, u64 end);

extern void pat_disable(char *reason);

extern int kernel_map_sync_memtype(u64 base, unsigned long size,
		unsigned long flag);

#endif /* _ASM_X86_PAT_H */
+29 −17
Original line number Diff line number Diff line
@@ -624,6 +624,33 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
	free_memtype(addr, addr + size);
}

/*
 * Change the memory type for the physial address range in kernel identity
 * mapping space if that range is a part of identity map.
 */
int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
{
	unsigned long id_sz;

	if (!pat_enabled || base >= __pa(high_memory))
		return 0;

	id_sz = (__pa(high_memory) < base + size) ?
				__pa(high_memory) - base :
				size;

	if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
		printk(KERN_INFO
			"%s:%d ioremap_change_attr failed %s "
			"for %Lx-%Lx\n",
			current->comm, current->pid,
			cattr_name(flags),
			base, (unsigned long long)(base + size));
		return -EINVAL;
	}
	return 0;
}

/*
 * Internal interface to reserve a range of physical memory with prot.
 * Reserved non RAM regions only and after successful reserve_memtype,
@@ -633,7 +660,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
				int strict_prot)
{
	int is_ram = 0;
	int id_sz, ret;
	int ret;
	unsigned long flags;
	unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);

@@ -670,23 +697,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
				     flags);
	}

	/* Need to keep identity mapping in sync */
	if (paddr >= __pa(high_memory))
		return 0;

	id_sz = (__pa(high_memory) < paddr + size) ?
				__pa(high_memory) - paddr :
				size;

	if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
	if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
		free_memtype(paddr, paddr + size);
		printk(KERN_ERR
			"%s:%d reserve_pfn_range ioremap_change_attr failed %s "
			"for %Lx-%Lx\n",
			current->comm, current->pid,
			cattr_name(flags),
			(unsigned long long)paddr,
			(unsigned long long)(paddr + size));
		return -EINVAL;
	}
	return 0;