Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a6525042 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'x86-pat-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86 PAT: remove CPA WARN_ON for zero pte
  x86 PAT: return compatible mapping to remap_pfn_range callers
  x86 PAT: change track_pfn_vma_new to take pgprot_t pointer param
  x86 PAT: consolidate old memtype new memtype check into a function
  x86 PAT: remove PFNMAP type on track_pfn_vma_new() error
parents b9a0d06a 58dab916
Loading
Loading
Loading
Loading
+19 −0
Original line number Diff line number Diff line
@@ -341,6 +341,25 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)

#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)

static inline int is_new_memtype_allowed(unsigned long flags,
						unsigned long new_flags)
{
	/*
	 * Certain new memtypes are not allowed with certain
	 * requested memtype:
	 * - request is uncached, return cannot be write-back
	 * - request is write-combine, return cannot be write-back
	 */
	if ((flags == _PAGE_CACHE_UC_MINUS &&
	     new_flags == _PAGE_CACHE_WB) ||
	    (flags == _PAGE_CACHE_WC &&
	     new_flags == _PAGE_CACHE_WB)) {
		return 0;
	}

	return 1;
}

#ifndef __ASSEMBLY__
/* Indicate that x86 has its own track and untrack pfn vma functions */
#define __HAVE_PFNMAP_TRACKING
+6 −4
Original line number Diff line number Diff line
@@ -555,10 +555,12 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
	if (!pte_val(old_pte)) {
		if (!primary)
			return 0;
		WARN(1, KERN_WARNING "CPA: called for zero pte. "
		       "vaddr = %lx cpa->vaddr = %lx\n", address,
		       *cpa->vaddr);
		return -EINVAL;

		/*
		 *  Special error value returned, indicating that the mapping
		 * did not exist at this address.
		 */
		return -EFAULT;
	}

	if (level == PG_LEVEL_4K) {
+61 −29
Original line number Diff line number Diff line
@@ -505,6 +505,35 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
}
#endif /* CONFIG_STRICT_DEVMEM */

/*
 * Change the memory type for the physial address range in kernel identity
 * mapping space if that range is a part of identity map.
 */
static int kernel_map_sync_memtype(u64 base, unsigned long size,
					unsigned long flags)
{
	unsigned long id_sz;
	int ret;

	if (!pat_enabled || base >= __pa(high_memory))
		return 0;

	id_sz = (__pa(high_memory) < base + size) ?
						__pa(high_memory) - base :
						size;

	ret = ioremap_change_attr((unsigned long)__va(base), id_sz, flags);
	/*
	 * -EFAULT return means that the addr was not valid and did not have
	 * any identity mapping. That case is a success for
	 * kernel_map_sync_memtype.
	 */
	if (ret == -EFAULT)
		ret = 0;

	return ret;
}

int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t *vma_prot)
{
@@ -555,9 +584,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
	if (retval < 0)
		return 0;

	if (((pfn < max_low_pfn_mapped) ||
	     (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
	    ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
	if (kernel_map_sync_memtype(offset, size, flags)) {
		free_memtype(offset, offset + size);
		printk(KERN_INFO
		"%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
@@ -601,12 +628,13 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
 * Reserved non RAM regions only and after successful reserve_memtype,
 * this func also keeps identity mapping (if any) in sync with this new prot.
 */
static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot)
static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
				int strict_prot)
{
	int is_ram = 0;
	int id_sz, ret;
	int ret;
	unsigned long flags;
	unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
	unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);

	is_ram = pagerange_is_ram(paddr, paddr + size);

@@ -625,9 +653,10 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot)
		return ret;

	if (flags != want_flags) {
		if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) {
			free_memtype(paddr, paddr + size);
		printk(KERN_ERR
		"%s:%d map pfn expected mapping type %s for %Lx-%Lx, got %s\n",
			printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
				" for %Lx-%Lx, got %s\n",
				current->comm, current->pid,
				cattr_name(want_flags),
				(unsigned long long)paddr,
@@ -635,16 +664,16 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot)
				cattr_name(flags));
			return -EINVAL;
		}
		/*
		 * We allow returning different type than the one requested in
		 * non strict case.
		 */
		*vma_prot = __pgprot((pgprot_val(*vma_prot) &
				      (~_PAGE_CACHE_MASK)) |
				     flags);
	}

	/* Need to keep identity mapping in sync */
	if (paddr >= __pa(high_memory))
		return 0;

	id_sz = (__pa(high_memory) < paddr + size) ?
				__pa(high_memory) - paddr :
				size;

	if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
	if (kernel_map_sync_memtype(paddr, size, flags)) {
		free_memtype(paddr, paddr + size);
		printk(KERN_ERR
			"%s:%d reserve_pfn_range ioremap_change_attr failed %s "
@@ -689,6 +718,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
	unsigned long vma_start = vma->vm_start;
	unsigned long vma_end = vma->vm_end;
	unsigned long vma_size = vma_end - vma_start;
	pgprot_t pgprot;

	if (!pat_enabled)
		return 0;
@@ -702,7 +732,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
			WARN_ON_ONCE(1);
			return -EINVAL;
		}
		return reserve_pfn_range(paddr, vma_size, __pgprot(prot));
		pgprot = __pgprot(prot);
		return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
	}

	/* reserve entire vma page by page, using pfn and prot from pte */
@@ -710,7 +741,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
		if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
			continue;

		retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot));
		pgprot = __pgprot(prot);
		retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
		if (retval)
			goto cleanup_ret;
	}
@@ -741,7 +773,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
 * Note that this function can be called with caller trying to map only a
 * subrange/page inside the vma.
 */
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
			unsigned long pfn, unsigned long size)
{
	int retval = 0;
@@ -758,14 +790,14 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
	if (is_linear_pfn_mapping(vma)) {
		/* reserve the whole chunk starting from vm_pgoff */
		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
		return reserve_pfn_range(paddr, vma_size, prot);
		return reserve_pfn_range(paddr, vma_size, prot, 0);
	}

	/* reserve page by page using pfn and size */
	base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
	for (i = 0; i < size; i += PAGE_SIZE) {
		paddr = base_paddr + i;
		retval = reserve_pfn_range(paddr, PAGE_SIZE, prot);
		retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
		if (retval)
			goto cleanup_ret;
	}
+1 −11
Original line number Diff line number Diff line
@@ -314,17 +314,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
		return retval;

	if (flags != new_flags) {
		/*
		 * Do not fallback to certain memory types with certain
		 * requested type:
		 * - request is uncached, return cannot be write-back
		 * - request is uncached, return cannot be write-combine
		 * - request is write-combine, return cannot be write-back
		 */
		if ((flags == _PAGE_CACHE_UC_MINUS &&
		     (new_flags == _PAGE_CACHE_WB)) ||
		    (flags == _PAGE_CACHE_WC &&
		     new_flags == _PAGE_CACHE_WB)) {
		if (!is_new_memtype_allowed(flags, new_flags)) {
			free_memtype(addr, addr+len);
			return -EINVAL;
		}
+2 −2
Original line number Diff line number Diff line
@@ -301,7 +301,7 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
 * for physical range indicated by pfn and size.
 */
static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
					unsigned long pfn, unsigned long size)
{
	return 0;
@@ -332,7 +332,7 @@ static inline void untrack_pfn_vma(struct vm_area_struct *vma,
{
}
#else
extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
				unsigned long pfn, unsigned long size);
extern int track_pfn_vma_copy(struct vm_area_struct *vma);
extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
Loading