Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 046f1533 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next

Pull x86 EFI updates from Peter Anvin:
 "A collection of EFI changes.  The perhaps most important one is to
  fully save and restore the FPU state around each invocation of EFI
  runtime, and to not choke on non-ASCII characters in the boot stub"

* 'x86-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  efivars: Add compatibility code for compat tasks
  efivars: Refactor sanity checking code into separate function
  efivars: Stop passing a struct argument to efivar_validate()
  efivars: Check size of user object
  efivars: Use local variables instead of a pointer dereference
  x86/efi: Save and restore FPU context around efi_calls (i386)
  x86/efi: Save and restore FPU context around efi_calls (x86_64)
  x86/efi: Implement a __efi_call_virt macro
  x86, fpu: Extend the use of static_cpu_has_safe
  x86/efi: Delete most of the efi_call* macros
  efi: x86: Handle arbitrary Unicode characters
  efi: Add get_dram_base() helper function
  efi: Add shared printk wrapper for consistent prefixing
  efi: create memory map iteration helper
  efi: efi-stub-helper cleanup
parents a0abcf2e e33655a3
Loading
Loading
Loading
Loading
+1 −2
Original line number Original line Diff line number Diff line
@@ -1087,8 +1087,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
	hdr->type_of_loader = 0x21;
	hdr->type_of_loader = 0x21;


	/* Convert unicode cmdline to ascii */
	/* Convert unicode cmdline to ascii */
	cmdline_ptr = efi_convert_cmdline_to_ascii(sys_table, image,
	cmdline_ptr = efi_convert_cmdline(sys_table, image, &options_size);
						   &options_size);
	if (!cmdline_ptr)
	if (!cmdline_ptr)
		goto fail;
		goto fail;
	hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
	hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
+1 −1
Original line number Original line Diff line number Diff line
@@ -452,7 +452,7 @@ efi32_config:
	.global efi64_config
	.global efi64_config
efi64_config:
efi64_config:
	.fill	11,8,0
	.fill	11,8,0
	.quad	efi_call6
	.quad	efi_call
	.byte	1
	.byte	1
#endif /* CONFIG_EFI_STUB */
#endif /* CONFIG_EFI_STUB */


+34 −66
Original line number Original line Diff line number Diff line
#ifndef _ASM_X86_EFI_H
#ifndef _ASM_X86_EFI_H
#define _ASM_X86_EFI_H
#define _ASM_X86_EFI_H


#include <asm/i387.h>
/*
/*
 * We map the EFI regions needed for runtime services non-contiguously,
 * We map the EFI regions needed for runtime services non-contiguously,
 * with preserved alignment on virtual addresses starting from -4G down
 * with preserved alignment on virtual addresses starting from -4G down
@@ -27,91 +28,58 @@


extern unsigned long asmlinkage efi_call_phys(void *, ...);
extern unsigned long asmlinkage efi_call_phys(void *, ...);


#define efi_call_phys0(f)		efi_call_phys(f)
#define efi_call_phys1(f, a1)		efi_call_phys(f, a1)
#define efi_call_phys2(f, a1, a2)	efi_call_phys(f, a1, a2)
#define efi_call_phys3(f, a1, a2, a3)	efi_call_phys(f, a1, a2, a3)
#define efi_call_phys4(f, a1, a2, a3, a4)	\
	efi_call_phys(f, a1, a2, a3, a4)
#define efi_call_phys5(f, a1, a2, a3, a4, a5)	\
	efi_call_phys(f, a1, a2, a3, a4, a5)
#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6)	\
	efi_call_phys(f, a1, a2, a3, a4, a5, a6)
/*
/*
 * Wrap all the virtual calls in a way that forces the parameters on the stack.
 * Wrap all the virtual calls in a way that forces the parameters on the stack.
 */
 */


/* Use this macro if your virtual returns a non-void value */
#define efi_call_virt(f, args...) \
#define efi_call_virt(f, args...) \
	((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args)
({									\

	efi_status_t __s;						\
#define efi_call_virt0(f)		efi_call_virt(f)
	kernel_fpu_begin();						\
#define efi_call_virt1(f, a1)		efi_call_virt(f, a1)
	__s = ((efi_##f##_t __attribute__((regparm(0)))*)		\
#define efi_call_virt2(f, a1, a2)	efi_call_virt(f, a1, a2)
		efi.systab->runtime->f)(args);				\
#define efi_call_virt3(f, a1, a2, a3)	efi_call_virt(f, a1, a2, a3)
	kernel_fpu_end();						\
#define efi_call_virt4(f, a1, a2, a3, a4)	\
	__s;								\
	efi_call_virt(f, a1, a2, a3, a4)
})
#define efi_call_virt5(f, a1, a2, a3, a4, a5)	\

	efi_call_virt(f, a1, a2, a3, a4, a5)
/* Use this macro if your virtual call does not return any value */
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6)	\
#define __efi_call_virt(f, args...) \
	efi_call_virt(f, a1, a2, a3, a4, a5, a6)
({									\
	kernel_fpu_begin();						\
	((efi_##f##_t __attribute__((regparm(0)))*)			\
		efi.systab->runtime->f)(args);				\
	kernel_fpu_end();						\
})


#define efi_ioremap(addr, size, type, attr)	ioremap_cache(addr, size)
#define efi_ioremap(addr, size, type, attr)	ioremap_cache(addr, size)


#else /* !CONFIG_X86_32 */
#else /* !CONFIG_X86_32 */


extern u64 efi_call0(void *fp);
#define EFI_LOADER_SIGNATURE	"EL64"
extern u64 efi_call1(void *fp, u64 arg1);

extern u64 efi_call2(void *fp, u64 arg1, u64 arg2);
extern u64 asmlinkage efi_call(void *fp, ...);
extern u64 efi_call3(void *fp, u64 arg1, u64 arg2, u64 arg3);

extern u64 efi_call4(void *fp, u64 arg1, u64 arg2, u64 arg3, u64 arg4);
#define efi_call_phys(f, args...)		efi_call((f), args)
extern u64 efi_call5(void *fp, u64 arg1, u64 arg2, u64 arg3,

		     u64 arg4, u64 arg5);
#define efi_call_virt(f, ...)						\
extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
		     u64 arg4, u64 arg5, u64 arg6);

#define efi_call_phys0(f)			\
	efi_call0((f))
#define efi_call_phys1(f, a1)			\
	efi_call1((f), (u64)(a1))
#define efi_call_phys2(f, a1, a2)			\
	efi_call2((f), (u64)(a1), (u64)(a2))
#define efi_call_phys3(f, a1, a2, a3)				\
	efi_call3((f), (u64)(a1), (u64)(a2), (u64)(a3))
#define efi_call_phys4(f, a1, a2, a3, a4)				\
	efi_call4((f), (u64)(a1), (u64)(a2), (u64)(a3),		\
		  (u64)(a4))
#define efi_call_phys5(f, a1, a2, a3, a4, a5)				\
	efi_call5((f), (u64)(a1), (u64)(a2), (u64)(a3),		\
		  (u64)(a4), (u64)(a5))
#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6)			\
	efi_call6((f), (u64)(a1), (u64)(a2), (u64)(a3),		\
		  (u64)(a4), (u64)(a5), (u64)(a6))

#define _efi_call_virtX(x, f, ...)					\
({									\
({									\
	efi_status_t __s;						\
	efi_status_t __s;						\
									\
									\
	efi_sync_low_kernel_mappings();					\
	efi_sync_low_kernel_mappings();					\
	preempt_disable();						\
	preempt_disable();						\
	__s = efi_call##x((void *)efi.systab->runtime->f, __VA_ARGS__);	\
	__kernel_fpu_begin();						\
	__s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__);	\
	__kernel_fpu_end();						\
	preempt_enable();						\
	preempt_enable();						\
	__s;								\
	__s;								\
})
})


#define efi_call_virt0(f)				\
/*
	_efi_call_virtX(0, f)
 * All X86_64 virt calls return non-void values. Thus, use non-void call for
#define efi_call_virt1(f, a1)				\
 * virt calls that would be void on X86_32.
	_efi_call_virtX(1, f, (u64)(a1))
 */
#define efi_call_virt2(f, a1, a2)			\
#define __efi_call_virt(f, args...) efi_call_virt(f, args)
	_efi_call_virtX(2, f, (u64)(a1), (u64)(a2))
#define efi_call_virt3(f, a1, a2, a3)			\
	_efi_call_virtX(3, f, (u64)(a1), (u64)(a2), (u64)(a3))
#define efi_call_virt4(f, a1, a2, a3, a4)		\
	_efi_call_virtX(4, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4))
#define efi_call_virt5(f, a1, a2, a3, a4, a5)		\
	_efi_call_virtX(5, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5))
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6)	\
	_efi_call_virtX(6, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))


extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
				 u32 type, u64 attribute);
				 u32 type, u64 attribute);
+5 −5
Original line number Original line Diff line number Diff line
@@ -87,22 +87,22 @@ static inline int is_x32_frame(void)


static __always_inline __pure bool use_eager_fpu(void)
static __always_inline __pure bool use_eager_fpu(void)
{
{
	return static_cpu_has(X86_FEATURE_EAGER_FPU);
	return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
}
}


static __always_inline __pure bool use_xsaveopt(void)
static __always_inline __pure bool use_xsaveopt(void)
{
{
	return static_cpu_has(X86_FEATURE_XSAVEOPT);
	return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
}
}


static __always_inline __pure bool use_xsave(void)
static __always_inline __pure bool use_xsave(void)
{
{
	return static_cpu_has(X86_FEATURE_XSAVE);
	return static_cpu_has_safe(X86_FEATURE_XSAVE);
}
}


static __always_inline __pure bool use_fxsr(void)
static __always_inline __pure bool use_fxsr(void)
{
{
        return static_cpu_has(X86_FEATURE_FXSR);
	return static_cpu_has_safe(X86_FEATURE_FXSR);
}
}


static inline void fx_finit(struct i387_fxsave_struct *fx)
static inline void fx_finit(struct i387_fxsave_struct *fx)
@@ -293,7 +293,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
	/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
	/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
	   is pending.  Clear the x87 state here by setting it to fixed
	   is pending.  Clear the x87 state here by setting it to fixed
	   values. "m" is a random variable that should be in L1 */
	   values. "m" is a random variable that should be in L1 */
	if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) {
	if (unlikely(static_cpu_has_safe(X86_FEATURE_FXSAVE_LEAK))) {
		asm volatile(
		asm volatile(
			"fnclex\n\t"
			"fnclex\n\t"
			"emms\n\t"
			"emms\n\t"
+23 −25
Original line number Original line Diff line number Diff line
@@ -110,7 +110,7 @@ static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
	efi_status_t status;
	efi_status_t status;


	spin_lock_irqsave(&rtc_lock, flags);
	spin_lock_irqsave(&rtc_lock, flags);
	status = efi_call_virt2(get_time, tm, tc);
	status = efi_call_virt(get_time, tm, tc);
	spin_unlock_irqrestore(&rtc_lock, flags);
	spin_unlock_irqrestore(&rtc_lock, flags);
	return status;
	return status;
}
}
@@ -121,7 +121,7 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm)
	efi_status_t status;
	efi_status_t status;


	spin_lock_irqsave(&rtc_lock, flags);
	spin_lock_irqsave(&rtc_lock, flags);
	status = efi_call_virt1(set_time, tm);
	status = efi_call_virt(set_time, tm);
	spin_unlock_irqrestore(&rtc_lock, flags);
	spin_unlock_irqrestore(&rtc_lock, flags);
	return status;
	return status;
}
}
@@ -134,8 +134,7 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
	efi_status_t status;
	efi_status_t status;


	spin_lock_irqsave(&rtc_lock, flags);
	spin_lock_irqsave(&rtc_lock, flags);
	status = efi_call_virt3(get_wakeup_time,
	status = efi_call_virt(get_wakeup_time, enabled, pending, tm);
				enabled, pending, tm);
	spin_unlock_irqrestore(&rtc_lock, flags);
	spin_unlock_irqrestore(&rtc_lock, flags);
	return status;
	return status;
}
}
@@ -146,8 +145,7 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
	efi_status_t status;
	efi_status_t status;


	spin_lock_irqsave(&rtc_lock, flags);
	spin_lock_irqsave(&rtc_lock, flags);
	status = efi_call_virt2(set_wakeup_time,
	status = efi_call_virt(set_wakeup_time, enabled, tm);
				enabled, tm);
	spin_unlock_irqrestore(&rtc_lock, flags);
	spin_unlock_irqrestore(&rtc_lock, flags);
	return status;
	return status;
}
}
@@ -158,7 +156,7 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
					  unsigned long *data_size,
					  unsigned long *data_size,
					  void *data)
					  void *data)
{
{
	return efi_call_virt5(get_variable,
	return efi_call_virt(get_variable,
			     name, vendor, attr,
			     name, vendor, attr,
			     data_size, data);
			     data_size, data);
}
}
@@ -167,7 +165,7 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
					       efi_char16_t *name,
					       efi_char16_t *name,
					       efi_guid_t *vendor)
					       efi_guid_t *vendor)
{
{
	return efi_call_virt3(get_next_variable,
	return efi_call_virt(get_next_variable,
			     name_size, name, vendor);
			     name_size, name, vendor);
}
}


@@ -177,7 +175,7 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
					  unsigned long data_size,
					  unsigned long data_size,
					  void *data)
					  void *data)
{
{
	return efi_call_virt5(set_variable,
	return efi_call_virt(set_variable,
			     name, vendor, attr,
			     name, vendor, attr,
			     data_size, data);
			     data_size, data);
}
}
@@ -190,13 +188,13 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
		return EFI_UNSUPPORTED;
		return EFI_UNSUPPORTED;


	return efi_call_virt4(query_variable_info, attr, storage_space,
	return efi_call_virt(query_variable_info, attr, storage_space,
			     remaining_space, max_variable_size);
			     remaining_space, max_variable_size);
}
}


static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
{
{
	return efi_call_virt1(get_next_high_mono_count, count);
	return efi_call_virt(get_next_high_mono_count, count);
}
}


static void virt_efi_reset_system(int reset_type,
static void virt_efi_reset_system(int reset_type,
@@ -204,7 +202,7 @@ static void virt_efi_reset_system(int reset_type,
				  unsigned long data_size,
				  unsigned long data_size,
				  efi_char16_t *data)
				  efi_char16_t *data)
{
{
	efi_call_virt4(reset_system, reset_type, status,
	__efi_call_virt(reset_system, reset_type, status,
			data_size, data);
			data_size, data);
}
}


@@ -215,7 +213,7 @@ static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
		return EFI_UNSUPPORTED;
		return EFI_UNSUPPORTED;


	return efi_call_virt3(update_capsule, capsules, count, sg_list);
	return efi_call_virt(update_capsule, capsules, count, sg_list);
}
}


static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
@@ -226,7 +224,7 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
		return EFI_UNSUPPORTED;
		return EFI_UNSUPPORTED;


	return efi_call_virt4(query_capsule_caps, capsules, count, max_size,
	return efi_call_virt(query_capsule_caps, capsules, count, max_size,
			     reset_type);
			     reset_type);
}
}


@@ -239,7 +237,7 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
	efi_status_t status;
	efi_status_t status;


	efi_call_phys_prelog();
	efi_call_phys_prelog();
	status = efi_call_phys4(efi_phys.set_virtual_address_map,
	status = efi_call_phys(efi_phys.set_virtual_address_map,
			       memory_map_size, descriptor_size,
			       memory_map_size, descriptor_size,
			       descriptor_version, virtual_map);
			       descriptor_version, virtual_map);
	efi_call_phys_epilog();
	efi_call_phys_epilog();
Loading