Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2fea7f6c authored by Will Deacon's avatar Will Deacon Committed by Catalin Marinas
Browse files

arm64: vdso: move to _install_special_mapping and remove arch_vma_name



_install_special_mapping replaces install_special_mapping and removes
the need to detect special VMA in arch_vma_name.

This patch moves the vdso and compat vectors page code over to the new
API.

Cc: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 87154938
Loading
Loading
Loading
Loading
+35 −45
Original line number Original line Diff line number Diff line
@@ -88,22 +88,29 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
{
{
	struct mm_struct *mm = current->mm;
	struct mm_struct *mm = current->mm;
	unsigned long addr = AARCH32_VECTORS_BASE;
	unsigned long addr = AARCH32_VECTORS_BASE;
	int ret;
	static struct vm_special_mapping spec = {
		.name	= "[vectors]",
		.pages	= vectors_page,

	};
	void *ret;


	down_write(&mm->mmap_sem);
	down_write(&mm->mmap_sem);
	current->mm->context.vdso = (void *)addr;
	current->mm->context.vdso = (void *)addr;


	/* Map vectors page at the high address. */
	/* Map vectors page at the high address. */
	ret = install_special_mapping(mm, addr, PAGE_SIZE,
	ret = _install_special_mapping(mm, addr, PAGE_SIZE,
				       VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
				       VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
				      vectors_page);
				       &spec);


	up_write(&mm->mmap_sem);
	up_write(&mm->mmap_sem);


	return ret;
	return PTR_ERR_OR_ZERO(ret);
}
}
#endif /* CONFIG_COMPAT */
#endif /* CONFIG_COMPAT */


static struct vm_special_mapping vdso_spec[2];

static int __init vdso_init(void)
static int __init vdso_init(void)
{
{
	int i;
	int i;
@@ -130,6 +137,17 @@ static int __init vdso_init(void)
	/* Grab the vDSO data page. */
	/* Grab the vDSO data page. */
	vdso_pagelist[i] = virt_to_page(vdso_data);
	vdso_pagelist[i] = virt_to_page(vdso_data);


	/* Populate the special mapping structures */
	vdso_spec[0] = (struct vm_special_mapping) {
		.name	= "[vdso]",
		.pages	= vdso_pagelist,
	};

	vdso_spec[1] = (struct vm_special_mapping) {
		.name	= "[vvar]",
		.pages	= vdso_pagelist + vdso_pages,
	};

	return 0;
	return 0;
}
}
arch_initcall(vdso_init);
arch_initcall(vdso_init);
@@ -139,7 +157,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
{
{
	struct mm_struct *mm = current->mm;
	struct mm_struct *mm = current->mm;
	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
	int ret;
	void *ret;


	vdso_text_len = vdso_pages << PAGE_SHIFT;
	vdso_text_len = vdso_pages << PAGE_SHIFT;
	/* Be sure to map the data page */
	/* Be sure to map the data page */
@@ -148,23 +166,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
	down_write(&mm->mmap_sem);
	down_write(&mm->mmap_sem);
	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
	if (IS_ERR_VALUE(vdso_base)) {
	if (IS_ERR_VALUE(vdso_base)) {
		ret = vdso_base;
		ret = ERR_PTR(vdso_base);
		goto up_fail;
		goto up_fail;
	}
	}
	mm->context.vdso = (void *)vdso_base;
	mm->context.vdso = (void *)vdso_base;


	ret = install_special_mapping(mm, vdso_base, vdso_text_len,
	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
				       VM_READ|VM_EXEC|
				       VM_READ|VM_EXEC|
				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
				      vdso_pagelist);
				       &vdso_spec[0]);
	if (ret)
	if (IS_ERR(ret))
		goto up_fail;
		goto up_fail;


	vdso_base += vdso_text_len;
	vdso_base += vdso_text_len;
	ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
	ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
				       VM_READ|VM_MAYREAD,
				       VM_READ|VM_MAYREAD,
				      vdso_pagelist + vdso_pages);
				       &vdso_spec[1]);
	if (ret)
	if (IS_ERR(ret))
		goto up_fail;
		goto up_fail;


	up_write(&mm->mmap_sem);
	up_write(&mm->mmap_sem);
@@ -173,35 +191,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
up_fail:
up_fail:
	mm->context.vdso = NULL;
	mm->context.vdso = NULL;
	up_write(&mm->mmap_sem);
	up_write(&mm->mmap_sem);
	return ret;
	return PTR_ERR(ret);
}

const char *arch_vma_name(struct vm_area_struct *vma)
{
	unsigned long vdso_text;

	if (!vma->vm_mm)
		return NULL;

	vdso_text = (unsigned long)vma->vm_mm->context.vdso;

	/*
	 * We can re-use the vdso pointer in mm_context_t for identifying
	 * the vectors page for compat applications. The vDSO will always
	 * sit above TASK_UNMAPPED_BASE and so we don't need to worry about
	 * it conflicting with the vectors base.
	 */
	if (vma->vm_start == vdso_text) {
#ifdef CONFIG_COMPAT
		if (vma->vm_start == AARCH32_VECTORS_BASE)
			return "[vectors]";
#endif
		return "[vdso]";
	} else if (vma->vm_start == (vdso_text + (vdso_pages << PAGE_SHIFT))) {
		return "[vvar]";
	}

	return NULL;
}
}


/*
/*