Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e0909392 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman
Browse files

powerpc/mm: Validate address values against different region limits



This adds an explicit check in various functions.

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 0034d395
Loading
Loading
Loading
Loading
+15 −3
Original line number Diff line number Diff line
@@ -781,7 +781,14 @@ int resize_hpt_for_hotplug(unsigned long new_mem_size)

int hash__create_section_mapping(unsigned long start, unsigned long end, int nid)
{
	int rc = htab_bolt_mapping(start, end, __pa(start),
	int rc;

	if (end >= H_VMALLOC_START) {
		pr_warn("Outisde the supported range\n");
		return -1;
	}

	rc = htab_bolt_mapping(start, end, __pa(start),
			       pgprot_val(PAGE_KERNEL), mmu_linear_psize,
			       mmu_kernel_ssize);

@@ -924,6 +931,11 @@ static void __init htab_initialize(void)
		DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
		    base, size, prot);

		if ((base + size) >= H_VMALLOC_START) {
			pr_warn("Outisde the supported range\n");
			continue;
		}

		BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
				prot, mmu_linear_psize, mmu_kernel_ssize));
	}
+10 −3
Original line number Diff line number Diff line
@@ -112,7 +112,14 @@ int __meminit hash__vmemmap_create_mapping(unsigned long start,
				       unsigned long page_size,
				       unsigned long phys)
{
	int rc = htab_bolt_mapping(start, start + page_size, phys,
	int rc;

	if ((start + page_size) >= H_VMEMMAP_END) {
		pr_warn("Outisde the supported range\n");
		return -1;
	}

	rc = htab_bolt_mapping(start, start + page_size, phys,
			       pgprot_val(PAGE_KERNEL),
			       mmu_vmemmap_psize, mmu_kernel_ssize);
	if (rc < 0) {
+16 −0
Original line number Diff line number Diff line
@@ -339,6 +339,12 @@ void __init radix_init_pgtable(void)
		 * page tables will be allocated within the range. No
		 * need or a node (which we don't have yet).
		 */

		if ((reg->base + reg->size) >= RADIX_VMALLOC_START) {
			pr_warn("Outisde the supported range\n");
			continue;
		}

		WARN_ON(create_physical_mapping(reg->base,
						reg->base + reg->size,
						-1));
@@ -895,6 +901,11 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)

int __meminit radix__create_section_mapping(unsigned long start, unsigned long end, int nid)
{
	if (end >= RADIX_VMALLOC_START) {
		pr_warn("Outisde the supported range\n");
		return -1;
	}

	return create_physical_mapping(start, end, nid);
}

@@ -922,6 +933,11 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
	int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
	int ret;

	if ((start + page_size) >= RADIX_VMEMMAP_END) {
		pr_warn("Outisde the supported range\n");
		return -1;
	}

	ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
	BUG_ON(ret);

+5 −0
Original line number Diff line number Diff line
@@ -121,6 +121,11 @@ void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_
	if (pgprot_val(prot) & H_PAGE_4K_PFN)
		return NULL;

	if ((ea + size) >= (void *)IOREMAP_END) {
		pr_warn("Outisde the supported range\n");
		return NULL;
	}

	WARN_ON(pa & ~PAGE_MASK);
	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
	WARN_ON(size & ~PAGE_MASK);