Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fe3f2053 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
parents 7fc7e2ee ef969434
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -227,7 +227,7 @@ config SMP
	  If you don't know what to do here, say N.
	  If you don't know what to do here, say N.


config NR_CPUS
config NR_CPUS
	int "Maximum number of CPUs (2-32)"
	int "Maximum number of CPUs (2-128)"
	range 2 128
	range 2 128
	depends on SMP
	depends on SMP
	default "32" if PPC64
	default "32" if PPC64
+9 −1
Original line number Original line Diff line number Diff line
@@ -102,7 +102,15 @@ int boot_cpuid_phys = 0;
dev_t boot_dev;
dev_t boot_dev;
u64 ppc64_pft_size;
u64 ppc64_pft_size;


struct ppc64_caches ppc64_caches;
/* Pick defaults since we might want to patch instructions
 * before we've read this from the device tree.
 */
struct ppc64_caches ppc64_caches = {
	.dline_size = 0x80,
	.log_dline_size = 7,
	.iline_size = 0x80,
	.log_iline_size = 7
};
EXPORT_SYMBOL_GPL(ppc64_caches);
EXPORT_SYMBOL_GPL(ppc64_caches);


/*
/*
+1 −1
Original line number Original line Diff line number Diff line
@@ -601,7 +601,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
	/* Handle hugepage regions */
	/* Handle hugepage regions */
	if (unlikely(in_hugepage_area(mm->context, ea))) {
	if (unlikely(in_hugepage_area(mm->context, ea))) {
		DBG_LOW(" -> huge page !\n");
		DBG_LOW(" -> huge page !\n");
		return hash_huge_page(mm, access, ea, vsid, local);
		return hash_huge_page(mm, access, ea, vsid, local, trap);
	}
	}


	/* Get PTE and page size from page tables */
	/* Get PTE and page size from page tables */
+77 −18
Original line number Original line Diff line number Diff line
@@ -148,43 +148,63 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
	return 0;
	return 0;
}
}


struct slb_flush_info {
	struct mm_struct *mm;
	u16 newareas;
};

static void flush_low_segments(void *parm)
static void flush_low_segments(void *parm)
{
{
	u16 areas = (unsigned long) parm;
	struct slb_flush_info *fi = parm;
	unsigned long i;
	unsigned long i;


	asm volatile("isync" : : : "memory");
	BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS);

	if (current->active_mm != fi->mm)
		return;

	/* Only need to do anything if this CPU is working in the same
	 * mm as the one which has changed */


	BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS);
	/* update the paca copy of the context struct */
	get_paca()->context = current->active_mm->context;


	asm volatile("isync" : : : "memory");
	for (i = 0; i < NUM_LOW_AREAS; i++) {
	for (i = 0; i < NUM_LOW_AREAS; i++) {
		if (! (areas & (1U << i)))
		if (! (fi->newareas & (1U << i)))
			continue;
			continue;
		asm volatile("slbie %0"
		asm volatile("slbie %0"
			     : : "r" ((i << SID_SHIFT) | SLBIE_C));
			     : : "r" ((i << SID_SHIFT) | SLBIE_C));
	}
	}

	asm volatile("isync" : : : "memory");
	asm volatile("isync" : : : "memory");
}
}


static void flush_high_segments(void *parm)
static void flush_high_segments(void *parm)
{
{
	u16 areas = (unsigned long) parm;
	struct slb_flush_info *fi = parm;
	unsigned long i, j;
	unsigned long i, j;


	asm volatile("isync" : : : "memory");


	BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS);
	BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS);


	if (current->active_mm != fi->mm)
		return;

	/* Only need to do anything if this CPU is working in the same
	 * mm as the one which has changed */

	/* update the paca copy of the context struct */
	get_paca()->context = current->active_mm->context;

	asm volatile("isync" : : : "memory");
	for (i = 0; i < NUM_HIGH_AREAS; i++) {
	for (i = 0; i < NUM_HIGH_AREAS; i++) {
		if (! (areas & (1U << i)))
		if (! (fi->newareas & (1U << i)))
			continue;
			continue;
		for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
		for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
			asm volatile("slbie %0"
			asm volatile("slbie %0"
				     :: "r" (((i << HTLB_AREA_SHIFT)
				     :: "r" (((i << HTLB_AREA_SHIFT)
					      + (j << SID_SHIFT)) | SLBIE_C));
					      + (j << SID_SHIFT)) | SLBIE_C));
	}
	}

	asm volatile("isync" : : : "memory");
	asm volatile("isync" : : : "memory");
}
}


@@ -229,6 +249,7 @@ static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
{
{
	unsigned long i;
	unsigned long i;
	struct slb_flush_info fi;


	BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
	BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
	BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
	BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
@@ -244,19 +265,20 @@ static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)


	mm->context.low_htlb_areas |= newareas;
	mm->context.low_htlb_areas |= newareas;


	/* update the paca copy of the context struct */
	get_paca()->context = mm->context;

	/* the context change must make it to memory before the flush,
	/* the context change must make it to memory before the flush,
	 * so that further SLB misses do the right thing. */
	 * so that further SLB misses do the right thing. */
	mb();
	mb();
	on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1);

	fi.mm = mm;
	fi.newareas = newareas;
	on_each_cpu(flush_low_segments, &fi, 0, 1);


	return 0;
	return 0;
}
}


static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
{
{
	struct slb_flush_info fi;
	unsigned long i;
	unsigned long i;


	BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
	BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
@@ -280,7 +302,10 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
	/* the context change must make it to memory before the flush,
	/* the context change must make it to memory before the flush,
	 * so that further SLB misses do the right thing. */
	 * so that further SLB misses do the right thing. */
	mb();
	mb();
	on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1);

	fi.mm = mm;
	fi.newareas = newareas;
	on_each_cpu(flush_high_segments, &fi, 0, 1);


	return 0;
	return 0;
}
}
@@ -639,8 +664,36 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
	return -ENOMEM;
	return -ENOMEM;
}
}


/*
 * Called by asm hashtable.S for doing lazy icache flush
 */
static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
						  pte_t pte, int trap)
{
	struct page *page;
	int i;

	if (!pfn_valid(pte_pfn(pte)))
		return rflags;

	page = pte_page(pte);

	/* page is dirty */
	if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
		if (trap == 0x400) {
			for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++)
				__flush_dcache_icache(page_address(page+i));
			set_bit(PG_arch_1, &page->flags);
		} else {
			rflags |= HPTE_R_N;
		}
	}
	return rflags;
}

int hash_huge_page(struct mm_struct *mm, unsigned long access,
int hash_huge_page(struct mm_struct *mm, unsigned long access,
		   unsigned long ea, unsigned long vsid, int local)
		   unsigned long ea, unsigned long vsid, int local,
		   unsigned long trap)
{
{
	pte_t *ptep;
	pte_t *ptep;
	unsigned long old_pte, new_pte;
	unsigned long old_pte, new_pte;
@@ -691,6 +744,11 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
	rflags = 0x2 | (!(new_pte & _PAGE_RW));
	rflags = 0x2 | (!(new_pte & _PAGE_RW));
 	/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
 	/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
	rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
	rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
		/* No CPU has hugepages but lacks no execute, so we
		 * don't need to worry about that case */
		rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
						       trap);


	/* Check if pte already has an hpte (case 2) */
	/* Check if pte already has an hpte (case 2) */
	if (unlikely(old_pte & _PAGE_HASHPTE)) {
	if (unlikely(old_pte & _PAGE_HASHPTE)) {
@@ -703,7 +761,8 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
		slot += (old_pte & _PAGE_F_GIX) >> 12;
		slot += (old_pte & _PAGE_F_GIX) >> 12;


		if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1)
		if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
					 local) == -1)
			old_pte &= ~_PAGE_HPTEFLAGS;
			old_pte &= ~_PAGE_HPTEFLAGS;
	}
	}


+1 −1
Original line number Original line Diff line number Diff line
@@ -125,7 +125,7 @@ void __init get_region(unsigned int nid, unsigned long *start_pfn,


	/* We didnt find a matching region, return start/end as 0 */
	/* We didnt find a matching region, return start/end as 0 */
	if (*start_pfn == -1UL)
	if (*start_pfn == -1UL)
		start_pfn = 0;
		*start_pfn = 0;
}
}


static inline void map_cpu_to_node(int cpu, int node)
static inline void map_cpu_to_node(int cpu, int node)
Loading