Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 481f75c0 authored by Dave Young's avatar Dave Young Committed by Matt Fleming
Browse files

x86/efi: Cleanup efi_enter_virtual_mode() function



Add two small functions:
efi_merge_regions() and efi_map_regions(), efi_enter_virtual_mode()
calls them instead of embedding two long for loop.

Signed-off-by: default avatarDave Young <dyoung@redhat.com>
Acked-by: default avatarBorislav Petkov <bp@suse.de>
Tested-by: default avatarToshi Kani <toshi.kani@hp.com>
Signed-off-by: default avatarMatt Fleming <matt.fleming@intel.com>
parent a7f84f03
Loading
Loading
Loading
Loading
+79 −53
Original line number Original line Diff line number Diff line
@@ -773,44 +773,12 @@ void __init old_map_region(efi_memory_desc_t *md)
		       (unsigned long long)md->phys_addr);
		       (unsigned long long)md->phys_addr);
}
}


/*
/* Merge contiguous regions of the same type and attribute */
 * This function will switch the EFI runtime services to virtual mode.
static void __init efi_merge_regions(void)
 * Essentially, we look through the EFI memmap and map every region that
 * has the runtime attribute bit set in its memory descriptor into the
 * ->trampoline_pgd page table using a top-down VA allocation scheme.
 *
 * The old method which used to update that memory descriptor with the
 * virtual address obtained from ioremap() is still supported when the
 * kernel is booted with efi=old_map on its command line. Same old
 * method enabled the runtime services to be called without having to
 * thunk back into physical mode for every invocation.
 *
 * The new method does a pagetable switch in a preemption-safe manner
 * so that we're in a different address space when calling a runtime
 * function. For function arguments passing we do copy the PGDs of the
 * kernel page table into ->trampoline_pgd prior to each call.
 */
void __init efi_enter_virtual_mode(void)
{
{
	void *p;
	efi_memory_desc_t *md, *prev_md = NULL;
	efi_memory_desc_t *md, *prev_md = NULL;
	void *p, *new_memmap = NULL;
	unsigned long size;
	efi_status_t status;
	u64 end, systab;
	int count = 0;

	efi.systab = NULL;


	/*
	 * We don't do virtual mode, since we don't do runtime services, on
	 * non-native EFI
	 */
	if (!efi_is_native()) {
		efi_unmap_memmap();
		return;
	}

	/* Merge contiguous regions of the same type and attribute */
	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
		u64 prev_size;
		u64 prev_size;
		md = p;
		md = p;
@@ -835,8 +803,31 @@ void __init efi_enter_virtual_mode(void)
			continue;
			continue;
		}
		}
		prev_md = md;
		prev_md = md;
	}
}


static void __init get_systab_virt_addr(efi_memory_desc_t *md)
{
	unsigned long size;
	u64 end, systab;

	size = md->num_pages << EFI_PAGE_SHIFT;
	end = md->phys_addr + size;
	systab = (u64)(unsigned long)efi_phys.systab;
	if (md->phys_addr <= systab && systab < end) {
		systab += md->virt_addr - md->phys_addr;
		efi.systab = (efi_system_table_t *)(unsigned long)systab;
	}
	}
}

/*
 * Map efi memory ranges for runtime serivce and update new_memmap with virtual
 * addresses.
 */
static void * __init efi_map_regions(int *count)
{
	efi_memory_desc_t *md;
	void *p, *tmp, *new_memmap = NULL;


	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
		md = p;
		md = p;
@@ -849,26 +840,64 @@ void __init efi_enter_virtual_mode(void)
		}
		}


		efi_map_region(md);
		efi_map_region(md);
		get_systab_virt_addr(md);


		size = md->num_pages << EFI_PAGE_SHIFT;
		tmp = krealloc(new_memmap, (*count + 1) * memmap.desc_size,
		end = md->phys_addr + size;
			       GFP_KERNEL);
		if (!tmp)
			goto out_krealloc;
		new_memmap = tmp;
		memcpy(new_memmap + (*count * memmap.desc_size), md,
		       memmap.desc_size);
		(*count)++;
	}


		systab = (u64) (unsigned long) efi_phys.systab;
	return new_memmap;
		if (md->phys_addr <= systab && systab < end) {
out_krealloc:
			systab += md->virt_addr - md->phys_addr;
	kfree(new_memmap);
	return NULL;
}


			efi.systab = (efi_system_table_t *) (unsigned long) systab;
/*
 * This function will switch the EFI runtime services to virtual mode.
 * Essentially, we look through the EFI memmap and map every region that
 * has the runtime attribute bit set in its memory descriptor into the
 * ->trampoline_pgd page table using a top-down VA allocation scheme.
 *
 * The old method which used to update that memory descriptor with the
 * virtual address obtained from ioremap() is still supported when the
 * kernel is booted with efi=old_map on its command line. Same old
 * method enabled the runtime services to be called without having to
 * thunk back into physical mode for every invocation.
 *
 * The new method does a pagetable switch in a preemption-safe manner
 * so that we're in a different address space when calling a runtime
 * function. For function arguments passing we do copy the PGDs of the
 * kernel page table into ->trampoline_pgd prior to each call.
 */
void __init efi_enter_virtual_mode(void)
{
	efi_status_t status;
	void *new_memmap = NULL;
	int count = 0;

	efi.systab = NULL;

	/*
	 * We don't do virtual mode, since we don't do runtime services, on
	 * non-native EFI
	 */
	if (!efi_is_native()) {
		efi_unmap_memmap();
		return;
	}
	}


		new_memmap = krealloc(new_memmap,
	efi_merge_regions();
				      (count + 1) * memmap.desc_size,
				      GFP_KERNEL);
		if (!new_memmap)
			goto err_out;


		memcpy(new_memmap + (count * memmap.desc_size), md,
	new_memmap = efi_map_regions(&count);
		       memmap.desc_size);
	if (!new_memmap) {
		count++;
		pr_err("Error reallocating memory, EFI runtime non-functional!\n");
		return;
	}
	}


	BUG_ON(!efi.systab);
	BUG_ON(!efi.systab);
@@ -922,9 +951,6 @@ void __init efi_enter_virtual_mode(void)
			 0, NULL);
			 0, NULL);


	return;
	return;

 err_out:
	pr_err("Error reallocating memory, EFI runtime non-functional!\n");
}
}


/*
/*