Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9faa1e59 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: Ioremap: fix wrong physical address handling in PAT code
  x86, tlb: Clean up and correct used type
  x86, iomap: Fix wrong page aligned size calculation in ioremapping code
  x86, mm: Create symbolic index into address_markers array
  x86, ioremap: Fix normal ram range check
  x86, ioremap: Fix incorrect physical address handling in PAE mode
  x86-64, mm: Initialize VDSO earlier on 64 bits
  x86, kmmio/mmiotrace: Fix double free of kmmio_fault_pages
parents d9a73c00 3709c857
Loading
Loading
Loading
Loading
+26 −6
Original line number Original line Diff line number Diff line
@@ -37,6 +37,28 @@ struct addr_marker {
	const char *name;
	const char *name;
};
};


/* indices for address_markers; keep sync'd w/ address_markers below */
enum address_markers_idx {
	USER_SPACE_NR = 0,
#ifdef CONFIG_X86_64
	KERNEL_SPACE_NR,
	LOW_KERNEL_NR,
	VMALLOC_START_NR,
	VMEMMAP_START_NR,
	HIGH_KERNEL_NR,
	MODULES_VADDR_NR,
	MODULES_END_NR,
#else
	KERNEL_SPACE_NR,
	VMALLOC_START_NR,
	VMALLOC_END_NR,
# ifdef CONFIG_HIGHMEM
	PKMAP_BASE_NR,
# endif
	FIXADDR_START_NR,
#endif
};

/* Address space markers hints */
/* Address space markers hints */
static struct addr_marker address_markers[] = {
static struct addr_marker address_markers[] = {
	{ 0, "User Space" },
	{ 0, "User Space" },
@@ -331,14 +353,12 @@ static int pt_dump_init(void)


#ifdef CONFIG_X86_32
#ifdef CONFIG_X86_32
	/* Not a compile-time constant on x86-32 */
	/* Not a compile-time constant on x86-32 */
	address_markers[2].start_address = VMALLOC_START;
	address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
	address_markers[3].start_address = VMALLOC_END;
	address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
# ifdef CONFIG_HIGHMEM
# ifdef CONFIG_HIGHMEM
	address_markers[4].start_address = PKMAP_BASE;
	address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
	address_markers[5].start_address = FIXADDR_START;
# else
	address_markers[4].start_address = FIXADDR_START;
# endif
# endif
	address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
#endif
#endif


	pe = debugfs_create_file("kernel_page_tables", 0600, NULL, NULL,
	pe = debugfs_create_file("kernel_page_tables", 0600, NULL, NULL,
+6 −8
Original line number Original line Diff line number Diff line
@@ -62,8 +62,8 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
static void __iomem *__ioremap_caller(resource_size_t phys_addr,
static void __iomem *__ioremap_caller(resource_size_t phys_addr,
		unsigned long size, unsigned long prot_val, void *caller)
		unsigned long size, unsigned long prot_val, void *caller)
{
{
	unsigned long pfn, offset, vaddr;
	unsigned long offset, vaddr;
	resource_size_t last_addr;
	resource_size_t pfn, last_pfn, last_addr;
	const resource_size_t unaligned_phys_addr = phys_addr;
	const resource_size_t unaligned_phys_addr = phys_addr;
	const unsigned long unaligned_size = size;
	const unsigned long unaligned_size = size;
	struct vm_struct *area;
	struct vm_struct *area;
@@ -100,10 +100,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
	/*
	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	 */
	for (pfn = phys_addr >> PAGE_SHIFT;
	last_pfn = last_addr >> PAGE_SHIFT;
				(pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
	for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
				pfn++) {

		int is_ram = page_is_ram(pfn);
		int is_ram = page_is_ram(pfn);


		if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
		if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
@@ -115,7 +113,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
	 * Mappings have to be page-aligned
	 * Mappings have to be page-aligned
	 */
	 */
	offset = phys_addr & ~PAGE_MASK;
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	phys_addr &= PHYSICAL_PAGE_MASK;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;


	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
@@ -613,7 +611,7 @@ void __init early_iounmap(void __iomem *addr, unsigned long size)
		return;
		return;
	}
	}
	offset = virt_addr & ~PAGE_MASK;
	offset = virt_addr & ~PAGE_MASK;
	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
	nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;


	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
	while (nrpages > 0) {
	while (nrpages > 0) {
+13 −3
Original line number Original line Diff line number Diff line
@@ -45,6 +45,8 @@ struct kmmio_fault_page {
	 * Protected by kmmio_lock, when linked into kmmio_page_table.
	 * Protected by kmmio_lock, when linked into kmmio_page_table.
	 */
	 */
	int count;
	int count;

	bool scheduled_for_release;
};
};


struct kmmio_delayed_release {
struct kmmio_delayed_release {
@@ -398,8 +400,11 @@ static void release_kmmio_fault_page(unsigned long page,
	BUG_ON(f->count < 0);
	BUG_ON(f->count < 0);
	if (!f->count) {
	if (!f->count) {
		disarm_kmmio_fault_page(f);
		disarm_kmmio_fault_page(f);
		if (!f->scheduled_for_release) {
			f->release_next = *release_list;
			f->release_next = *release_list;
			*release_list = f;
			*release_list = f;
			f->scheduled_for_release = true;
		}
	}
	}
}
}


@@ -471,8 +476,10 @@ static void remove_kmmio_fault_pages(struct rcu_head *head)
			prevp = &f->release_next;
			prevp = &f->release_next;
		} else {
		} else {
			*prevp = f->release_next;
			*prevp = f->release_next;
			f->release_next = NULL;
			f->scheduled_for_release = false;
		}
		}
		f = f->release_next;
		f = *prevp;
	}
	}
	spin_unlock_irqrestore(&kmmio_lock, flags);
	spin_unlock_irqrestore(&kmmio_lock, flags);


@@ -510,6 +517,9 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
	kmmio_count--;
	kmmio_count--;
	spin_unlock_irqrestore(&kmmio_lock, flags);
	spin_unlock_irqrestore(&kmmio_lock, flags);


	if (!release_list)
		return;

	drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
	drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
	if (!drelease) {
	if (!drelease) {
		pr_crit("leaking kmmio_fault_page objects.\n");
		pr_crit("leaking kmmio_fault_page objects.\n");
+1 −1
Original line number Original line Diff line number Diff line
@@ -158,7 +158,7 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
	return req_type;
	return req_type;
}
}


static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
{
{
	int ram_page = 0, not_rampage = 0;
	int ram_page = 0, not_rampage = 0;
	unsigned long page_nr;
	unsigned long page_nr;
+22 −0
Original line number Original line Diff line number Diff line
@@ -90,6 +90,27 @@ static void do_test(unsigned long size)
	iounmap(p);
	iounmap(p);
}
}


/*
 * Tests how mmiotrace behaves in face of multiple ioremap / iounmaps in
 * a short time. We had a bug in deferred freeing procedure which tried
 * to free this region multiple times (ioremap can reuse the same address
 * for many mappings).
 */
static void do_test_bulk_ioremapping(void)
{
	void __iomem *p;
	int i;

	for (i = 0; i < 10; ++i) {
		p = ioremap_nocache(mmio_address, PAGE_SIZE);
		if (p)
			iounmap(p);
	}

	/* Force freeing. If it will crash we will know why. */
	synchronize_rcu();
}

static int __init init(void)
static int __init init(void)
{
{
	unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
	unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
@@ -104,6 +125,7 @@ static int __init init(void)
		   "and writing 16 kB of rubbish in there.\n",
		   "and writing 16 kB of rubbish in there.\n",
		   size >> 10, mmio_address);
		   size >> 10, mmio_address);
	do_test(size);
	do_test(size);
	do_test_bulk_ioremapping();
	pr_info("All done.\n");
	pr_info("All done.\n");
	return 0;
	return 0;
}
}
Loading