Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 04774069 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'core-fixes-for-linus' of...

Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  xen: make sure stray alias mappings are gone before pinning
  vmap: cope with vm_unmap_aliases before vmalloc_init()
parents 54e7ff9d d05fdf31
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -863,15 +863,16 @@ static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned l
	if (PagePinned(virt_to_page(mm->pgd))) {
		SetPagePinned(page);

		vm_unmap_aliases();
		if (!PageHighMem(page)) {
			make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
			if (level == PT_PTE && USE_SPLIT_PTLOCKS)
				pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
		} else
		} else {
			/* make sure there are no stray mappings of
			   this page */
			kmap_flush_unused();
			vm_unmap_aliases();
		}
	}
}

+6 −3
Original line number Diff line number Diff line
@@ -850,13 +850,16 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,
   read-only, and can be pinned. */
static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
{
	vm_unmap_aliases();

	xen_mc_batch();

	 if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) {
		/* re-enable interrupts for kmap_flush_unused */
		/* re-enable interrupts for flushing */
		xen_mc_issue(0);

		kmap_flush_unused();
		vm_unmap_aliases();

		xen_mc_batch();
	}

+7 −0
Original line number Diff line number Diff line
@@ -592,6 +592,8 @@ static void free_unmap_vmap_area_addr(unsigned long addr)

#define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)

static bool vmap_initialized __read_mostly = false;

struct vmap_block_queue {
	spinlock_t lock;
	struct list_head free;
@@ -828,6 +830,9 @@ void vm_unmap_aliases(void)
	int cpu;
	int flush = 0;

	if (unlikely(!vmap_initialized))
		return;

	for_each_possible_cpu(cpu) {
		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
		struct vmap_block *vb;
@@ -942,6 +947,8 @@ void __init vmalloc_init(void)
		INIT_LIST_HEAD(&vbq->dirty);
		vbq->nr_dirty = 0;
	}

	vmap_initialized = true;
}

void unmap_kernel_range(unsigned long addr, unsigned long size)