Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7a7039ee authored by Chris Metcalf's avatar Chris Metcalf
Browse files

arch/tile: fix bug in loading kernels larger than 16 MB



Previously we only handled kernels up to a single huge page in size.
Now we create additional PTEs appropriately.

Signed-off-by: default avatarChris Metcalf <cmetcalf@tilera.com>
parent b230ff2d
Loading
Loading
Loading
Loading
+15 −6
Original line number Original line Diff line number Diff line
@@ -557,6 +557,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)


	address = MEM_SV_INTRPT;
	address = MEM_SV_INTRPT;
	pmd = get_pmd(pgtables, address);
	pmd = get_pmd(pgtables, address);
	pfn = 0;  /* code starts at PA 0 */
	if (ktext_small) {
	if (ktext_small) {
		/* Allocate an L2 PTE for the kernel text */
		/* Allocate an L2 PTE for the kernel text */
		int cpu = 0;
		int cpu = 0;
@@ -579,10 +580,15 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
		}
		}


		BUG_ON(address != (unsigned long)_stext);
		BUG_ON(address != (unsigned long)_stext);
		pfn = 0;  /* code starts at PA 0 */
		pte = NULL;
		for (; address < (unsigned long)_einittext;
		     pfn++, address += PAGE_SIZE) {
			pte_ofs = pte_index(address);
			if (pte_ofs == 0) {
				if (pte)
					assign_pte(pmd++, pte);
				pte = alloc_pte();
				pte = alloc_pte();
		for (pte_ofs = 0; address < (unsigned long)_einittext;
			}
		     pfn++, pte_ofs++, address += PAGE_SIZE) {
			if (!ktext_local) {
			if (!ktext_local) {
				prot = set_remote_cache_cpu(prot, cpu);
				prot = set_remote_cache_cpu(prot, cpu);
				cpu = cpumask_next(cpu, &ktext_mask);
				cpu = cpumask_next(cpu, &ktext_mask);
@@ -591,6 +597,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
			}
			}
			pte[pte_ofs] = pfn_pte(pfn, prot);
			pte[pte_ofs] = pfn_pte(pfn, prot);
		}
		}
		if (pte)
			assign_pte(pmd, pte);
			assign_pte(pmd, pte);
	} else {
	} else {
		pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
		pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
@@ -614,7 +621,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
		else
		else
			pteval = hv_pte_set_mode(pteval,
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_NO_L3);
						 HV_PTE_MODE_CACHE_NO_L3);
		*(pte_t *)pmd = pteval;
		for (; address < (unsigned long)_einittext;
		     pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
			*(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
	}
	}


	/* Set swapper_pgprot here so it is flushed to memory right away. */
	/* Set swapper_pgprot here so it is flushed to memory right away. */