Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a59e527c authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: Allow lowmem to be non contiguous and mixed"

parents cce002f1 8817c86d
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -91,6 +91,7 @@ void __init add_static_vm_early(struct static_vm *svm)
	void *vaddr;

	vm = &svm->vm;
	if (!vm_area_check_early(vm))
		vm_area_add_early(vm);
	vaddr = vm->addr;

+38 −2
Original line number Diff line number Diff line
@@ -1374,12 +1374,21 @@ static void __init map_lowmem(void)
	struct memblock_region *reg;
	unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
	unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
	struct static_vm *svm;
	phys_addr_t start;
	phys_addr_t end;
	unsigned long vaddr;
	unsigned long pfn;
	unsigned long length;
	unsigned int type;
	int nr = 0;

	/* Map all the lowmem memory banks. */
	for_each_memblock(memory, reg) {
		phys_addr_t start = reg->base;
		phys_addr_t end = start + reg->size;
		struct map_desc map;
		start = reg->base;
		end = start + reg->size;
		nr++;

		if (end > arm_lowmem_limit)
			end = arm_lowmem_limit;
@@ -1428,6 +1437,33 @@ static void __init map_lowmem(void)
			}
		}
	}
	svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));

	for_each_memblock(memory, reg) {
		struct vm_struct *vm;

		start = reg->base;
		end = start + reg->size;

		if (end > arm_lowmem_limit)
			end = arm_lowmem_limit;
		if (start >= end)
			break;

		vm = &svm->vm;
		pfn = __phys_to_pfn(start);
		vaddr = __phys_to_virt(start);
		length = end - start;
		type = MT_MEMORY_RW;

		vm->addr = (void *)(vaddr & PAGE_MASK);
		vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
		vm->phys_addr = __pfn_to_phys(pfn);
		vm->flags = VM_LOWMEM;
		vm->flags |= VM_ARM_MTYPE(type);
		vm->caller = map_lowmem;
		add_static_vm_early(svm++);
	}
}

#ifdef CONFIG_ARM_LPAE
+3 −0
Original line number Diff line number Diff line
@@ -18,6 +18,8 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
#define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
#define VM_NO_GUARD		0x00000040      /* don't add guard page */
#define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */
#define VM_LOWMEM		0x00000100	/* Tracking of direct mapped lowmem */

/* bits [20..32] reserved for arch specific ioremap internals */

/*
@@ -158,6 +160,7 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
extern struct list_head vmap_area_list;
extern __init void vm_area_add_early(struct vm_struct *vm);
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
extern __init int vm_area_check_early(struct vm_struct *vm);

#ifdef CONFIG_SMP
# ifdef CONFIG_MMU
+27 −0
Original line number Diff line number Diff line
@@ -1123,6 +1123,33 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
EXPORT_SYMBOL(vm_map_ram);

static struct vm_struct *vmlist __initdata;

/**
 * vm_area_check_early - check if vmap area is already mapped
 * @vm: vm_struct to be checked
 *
 * This function is used to check if the vmap area has been
 * mapped already. @vm->addr, @vm->size and @vm->flags should
 * contain proper values.
 *
 */
int __init vm_area_check_early(struct vm_struct *vm)
{
	struct vm_struct *tmp, **p;

	BUG_ON(vmap_initialized);
	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
		if (tmp->addr >= vm->addr) {
			if (tmp->addr < vm->addr + vm->size)
				return 1;
		} else {
			if (tmp->addr + tmp->size > vm->addr)
				return 1;
		}
	}
	return 0;
}

/**
 * vm_area_add_early - add vmap area early during boot
 * @vm: vm_struct to add