From: Arvind Sankar nivedita@alum.mit.edu
[ Upstream commit 41d90b0c1108d1e46c48cf79964636c553844f4c ]
Commit
17054f492dfd ("efi/x86: Implement mixed mode boot without the handover protocol")
introduced a new entry point for the EFI stub to be booted in mixed mode on 32-bit firmware.
When entered via efi32_pe_entry, control is first transferred to startup_32 to setup for the switch to long mode, and then the EFI stub proper is entered via efi_pe_entry. efi_pe_entry is an MS ABI function, and the ABI requires 32 bytes of shadow stack space to be allocated by the caller, as well as the stack being aligned to 8 mod 16 on entry.
Allocate 40 bytes on the stack before switching to 64-bit mode when calling efi_pe_entry to account for this.
For robustness, explicitly align boot_stack_end to 16 bytes. It is currently implicitly aligned since .bss is cacheline-size aligned, head_64.o is the first object file with a .bss section, and the heap and boot sizes are aligned.
Fixes: 17054f492dfd ("efi/x86: Implement mixed mode boot without the handover protocol") Signed-off-by: Arvind Sankar nivedita@alum.mit.edu Link: https://lore.kernel.org/r/20200617131957.2507632-1-nivedita@alum.mit.edu Signed-off-by: Ard Biesheuvel ardb@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/boot/compressed/head_64.S | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 76d1d64d51e38..41f7922086220 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -213,7 +213,6 @@ SYM_FUNC_START(startup_32) * We place all of the values on our mini stack so lret can * used to perform that far jump. */ - pushl $__KERNEL_CS leal startup_64(%ebp), %eax #ifdef CONFIG_EFI_MIXED movl efi32_boot_args(%ebp), %edi @@ -224,11 +223,20 @@ SYM_FUNC_START(startup_32) movl efi32_boot_args+8(%ebp), %edx // saved bootparams pointer cmpl $0, %edx jnz 1f + /* + * efi_pe_entry uses MS calling convention, which requires 32 bytes of + * shadow space on the stack even if all arguments are passed in + * registers. We also need an additional 8 bytes for the space that + * would be occupied by the return address, and this also results in + * the correct stack alignment for entry. + */ + subl $40, %esp leal efi_pe_entry(%ebp), %eax movl %edi, %ecx // MS calling convention movl %esi, %edx 1: #endif + pushl $__KERNEL_CS pushl %eax
/* Enter paged protected Mode, activating Long Mode */ @@ -776,6 +784,7 @@ SYM_DATA_LOCAL(boot_heap, .fill BOOT_HEAP_SIZE, 1, 0)
SYM_DATA_START_LOCAL(boot_stack) .fill BOOT_STACK_SIZE, 1, 0 + .balign 16 SYM_DATA_END_LABEL(boot_stack, SYM_L_LOCAL, boot_stack_end)
/*