Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 869c34f5 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: ce4100: Set pci ops via callback instead of module init
  x86/mm: Fix pgd_lock deadlock
  x86/mm: Handle mm_fault_error() in kernel space
  x86: Don't check for BIOS corruption in first 64K when there's no need to
parents 52d3c036 03150171
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
#ifndef _ASM_CE4100_H_
#define _ASM_CE4100_H_

int ce4100_pci_init(void);

#endif
+4 −4
Original line number Diff line number Diff line
@@ -106,8 +106,8 @@ void __init setup_bios_corruption_check(void)
		addr += size;
	}

	printk(KERN_INFO "Scanning %d areas for low memory corruption\n",
	       num_scan_areas);
	if (num_scan_areas)
		printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas);
}


@@ -148,7 +148,7 @@ static void check_corruption(struct work_struct *dummy)

static int start_periodic_check_for_corruption(void)
{
	if (!memory_corruption_check || corruption_check_period == 0)
	if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0)
		return 0;

	printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n",
+10 −4
Original line number Diff line number Diff line
@@ -229,15 +229,14 @@ void vmalloc_sync_all(void)
	for (address = VMALLOC_START & PMD_MASK;
	     address >= TASK_SIZE && address < FIXADDR_TOP;
	     address += PMD_SIZE) {

		unsigned long flags;
		struct page *page;

		spin_lock_irqsave(&pgd_lock, flags);
		spin_lock(&pgd_lock);
		list_for_each_entry(page, &pgd_list, lru) {
			spinlock_t *pgt_lock;
			pmd_t *ret;

			/* the pgt_lock only for Xen */
			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;

			spin_lock(pgt_lock);
@@ -247,7 +246,7 @@ void vmalloc_sync_all(void)
			if (!ret)
				break;
		}
		spin_unlock_irqrestore(&pgd_lock, flags);
		spin_unlock(&pgd_lock);
	}
}

@@ -828,6 +827,13 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
	       unsigned long address, unsigned int fault)
{
	if (fault & VM_FAULT_OOM) {
		/* Kernel mode? Handle exceptions or die: */
		if (!(error_code & PF_USER)) {
			up_read(&current->mm->mmap_sem);
			no_context(regs, error_code, address);
			return;
		}

		out_of_memory(regs, error_code, address);
	} else {
		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
+3 −3
Original line number Diff line number Diff line
@@ -105,18 +105,18 @@ void sync_global_pgds(unsigned long start, unsigned long end)

	for (address = start; address <= end; address += PGDIR_SIZE) {
		const pgd_t *pgd_ref = pgd_offset_k(address);
		unsigned long flags;
		struct page *page;

		if (pgd_none(*pgd_ref))
			continue;

		spin_lock_irqsave(&pgd_lock, flags);
		spin_lock(&pgd_lock);
		list_for_each_entry(page, &pgd_list, lru) {
			pgd_t *pgd;
			spinlock_t *pgt_lock;

			pgd = (pgd_t *)page_address(page) + pgd_index(address);
			/* the pgt_lock only for Xen */
			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
			spin_lock(pgt_lock);

@@ -128,7 +128,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)

			spin_unlock(pgt_lock);
		}
		spin_unlock_irqrestore(&pgd_lock, flags);
		spin_unlock(&pgd_lock);
	}
}

+8 −10
Original line number Diff line number Diff line
@@ -57,12 +57,10 @@ static unsigned long direct_pages_count[PG_LEVEL_NUM];

void update_page_count(int level, unsigned long pages)
{
	unsigned long flags;

	/* Protect against CPA */
	spin_lock_irqsave(&pgd_lock, flags);
	spin_lock(&pgd_lock);
	direct_pages_count[level] += pages;
	spin_unlock_irqrestore(&pgd_lock, flags);
	spin_unlock(&pgd_lock);
}

static void split_page_count(int level)
@@ -394,7 +392,7 @@ static int
try_preserve_large_page(pte_t *kpte, unsigned long address,
			struct cpa_data *cpa)
{
	unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
	unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn;
	pte_t new_pte, old_pte, *tmp;
	pgprot_t old_prot, new_prot, req_prot;
	int i, do_split = 1;
@@ -403,7 +401,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
	if (cpa->force_split)
		return 1;

	spin_lock_irqsave(&pgd_lock, flags);
	spin_lock(&pgd_lock);
	/*
	 * Check for races, another CPU might have split this page
	 * up already:
@@ -498,14 +496,14 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
	}

out_unlock:
	spin_unlock_irqrestore(&pgd_lock, flags);
	spin_unlock(&pgd_lock);

	return do_split;
}

static int split_large_page(pte_t *kpte, unsigned long address)
{
	unsigned long flags, pfn, pfninc = 1;
	unsigned long pfn, pfninc = 1;
	unsigned int i, level;
	pte_t *pbase, *tmp;
	pgprot_t ref_prot;
@@ -519,7 +517,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
	if (!base)
		return -ENOMEM;

	spin_lock_irqsave(&pgd_lock, flags);
	spin_lock(&pgd_lock);
	/*
	 * Check for races, another CPU might have split this page
	 * up for us already:
@@ -591,7 +589,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
	 */
	if (base)
		__free_page(base);
	spin_unlock_irqrestore(&pgd_lock, flags);
	spin_unlock(&pgd_lock);

	return 0;
}
Loading