This is a note to let you know that I've just added the patch titled
x86/entry/64: Use a per-CPU trampoline stack for IDT entries
to the 4.14-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git%3Ba=su...
The filename of the patch is: x86-entry-64-use-a-per-cpu-trampoline-stack-for-idt-entries.patch and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree, please let stable@vger.kernel.org know about it.
From 7f2590a110b837af5679d08fc25c6227c5a8c497 Mon Sep 17 00:00:00 2001
From: Andy Lutomirski luto@kernel.org Date: Mon, 4 Dec 2017 15:07:23 +0100 Subject: x86/entry/64: Use a per-CPU trampoline stack for IDT entries
From: Andy Lutomirski luto@kernel.org
commit 7f2590a110b837af5679d08fc25c6227c5a8c497 upstream.
Historically, IDT entries from usermode have always gone directly to the running task's kernel stack. Rearrange it so that we enter on a per-CPU trampoline stack and then manually switch to the task's stack. This touches a couple of extra cachelines, but it gives us a chance to run some code before we touch the kernel stack.
The asm isn't exactly beautiful, but I think that fully refactoring it can wait.
Signed-off-by: Andy Lutomirski luto@kernel.org Signed-off-by: Thomas Gleixner tglx@linutronix.de Reviewed-by: Borislav Petkov bp@suse.de Reviewed-by: Thomas Gleixner tglx@linutronix.de Cc: Boris Ostrovsky boris.ostrovsky@oracle.com Cc: Borislav Petkov bp@alien8.de Cc: Borislav Petkov bpetkov@suse.de Cc: Brian Gerst brgerst@gmail.com Cc: Dave Hansen dave.hansen@intel.com Cc: Dave Hansen dave.hansen@linux.intel.com Cc: David Laight David.Laight@aculab.com Cc: Denys Vlasenko dvlasenk@redhat.com Cc: Eduardo Valentin eduval@amazon.com Cc: Greg KH gregkh@linuxfoundation.org Cc: H. Peter Anvin hpa@zytor.com Cc: Josh Poimboeuf jpoimboe@redhat.com Cc: Juergen Gross jgross@suse.com Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Peter Zijlstra peterz@infradead.org Cc: Rik van Riel riel@redhat.com Cc: Will Deacon will.deacon@arm.com Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150606.225330557@linutronix.de Signed-off-by: Ingo Molnar mingo@kernel.org Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org
--- arch/x86/entry/entry_64.S | 67 +++++++++++++++++++++++++++++---------- arch/x86/entry/entry_64_compat.S | 5 ++ arch/x86/include/asm/switch_to.h | 4 +- arch/x86/include/asm/traps.h | 1 arch/x86/kernel/cpu/common.c | 6 ++- arch/x86/kernel/traps.c | 21 ++++++------ 6 files changed, 72 insertions(+), 32 deletions(-)
--- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -560,6 +560,13 @@ END(irq_entries_start) /* 0(%rsp): ~(interrupt number) */ .macro interrupt func cld + + testb $3, CS-ORIG_RAX(%rsp) + jz 1f + SWAPGS + call switch_to_thread_stack +1: + ALLOC_PT_GPREGS_ON_STACK SAVE_C_REGS SAVE_EXTRA_REGS @@ -569,12 +576,8 @@ END(irq_entries_start) jz 1f
/* - * IRQ from user mode. Switch to kernel gsbase and inform context - * tracking that we're in kernel mode. - */ - SWAPGS - - /* + * IRQ from user mode. + * * We need to tell lockdep that IRQs are off. We can't do this until * we fix gsbase, and we should do it before enter_from_user_mode * (which can take locks). Since TRACE_IRQS_OFF idempotent, @@ -828,6 +831,32 @@ apicinterrupt IRQ_WORK_VECTOR irq_work */ #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
+/* + * Switch to the thread stack. This is called with the IRET frame and + * orig_ax on the stack. (That is, RDI..R12 are not on the stack and + * space has not been allocated for them.) + */ +ENTRY(switch_to_thread_stack) + UNWIND_HINT_FUNC + + pushq %rdi + movq %rsp, %rdi + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI + + pushq 7*8(%rdi) /* regs->ss */ + pushq 6*8(%rdi) /* regs->rsp */ + pushq 5*8(%rdi) /* regs->eflags */ + pushq 4*8(%rdi) /* regs->cs */ + pushq 3*8(%rdi) /* regs->ip */ + pushq 2*8(%rdi) /* regs->orig_ax */ + pushq 8(%rdi) /* return address */ + UNWIND_HINT_FUNC + + movq (%rdi), %rdi + ret +END(switch_to_thread_stack) + .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ENTRY(\sym) UNWIND_HINT_IRET_REGS offset=\has_error_code*8 @@ -845,11 +874,12 @@ ENTRY(\sym)
ALLOC_PT_GPREGS_ON_STACK
- .if \paranoid - .if \paranoid == 1 + .if \paranoid < 2 testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ - jnz 1f + jnz .Lfrom_usermode_switch_stack_@ .endif + + .if \paranoid call paranoid_entry .else call error_entry @@ -891,20 +921,15 @@ ENTRY(\sym) jmp error_exit .endif
- .if \paranoid == 1 + .if \paranoid < 2 /* - * Paranoid entry from userspace. Switch stacks and treat it + * Entry from userspace. Switch stacks and treat it * as a normal entry. This means that paranoid handlers * run in real process context if user_mode(regs). */ -1: +.Lfrom_usermode_switch_stack_@: call error_entry
- - movq %rsp, %rdi /* pt_regs pointer */ - call sync_regs - movq %rax, %rsp /* switch stack */ - movq %rsp, %rdi /* pt_regs pointer */
.if \has_error_code @@ -1165,6 +1190,14 @@ ENTRY(error_entry) SWAPGS
.Lerror_entry_from_usermode_after_swapgs: + /* Put us onto the real thread stack. */ + popq %r12 /* save return addr in %12 */ + movq %rsp, %rdi /* arg0 = pt_regs pointer */ + call sync_regs + movq %rax, %rsp /* switch stack */ + ENCODE_FRAME_POINTER + pushq %r12 + /* * We need to tell lockdep that IRQs are off. We can't do this until * we fix gsbase, and we should do it before enter_from_user_mode --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -306,8 +306,11 @@ ENTRY(entry_INT80_compat) */ movl %eax, %eax
- /* Construct struct pt_regs on stack (iret frame is already on stack) */ pushq %rax /* pt_regs->orig_ax */ + + /* switch to thread stack expects orig_ax to be pushed */ + call switch_to_thread_stack + pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ pushq %rdx /* pt_regs->dx */ --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -90,10 +90,12 @@ static inline void refresh_sysenter_cs(s /* This is used when switching tasks or entering/exiting vm86 mode. */ static inline void update_sp0(struct task_struct *task) { + /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */ #ifdef CONFIG_X86_32 load_sp0(task->thread.sp0); #else - load_sp0(task_top_of_stack(task)); + if (static_cpu_has(X86_FEATURE_XENPV)) + load_sp0(task_top_of_stack(task)); #endif }
--- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -75,7 +75,6 @@ dotraplinkage void do_segment_not_presen dotraplinkage void do_stack_segment(struct pt_regs *, long); #ifdef CONFIG_X86_64 dotraplinkage void do_double_fault(struct pt_regs *, long); -asmlinkage struct pt_regs *sync_regs(struct pt_regs *); #endif dotraplinkage void do_general_protection(struct pt_regs *, long); dotraplinkage void do_page_fault(struct pt_regs *, unsigned long); --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1623,11 +1623,13 @@ void cpu_init(void) setup_cpu_entry_area(cpu);
/* - * Initialize the TSS. Don't bother initializing sp0, as the initial - * task never enters user mode. + * Initialize the TSS. sp0 points to the entry trampoline stack + * regardless of what task is running. */ set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); load_TR_desc(); + load_sp0((unsigned long)&get_cpu_entry_area(cpu)->tss + + offsetofend(struct tss_struct, SYSENTER_stack));
load_mm_ldt(&init_mm);
--- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -619,14 +619,15 @@ NOKPROBE_SYMBOL(do_int3);
#ifdef CONFIG_X86_64 /* - * Help handler running on IST stack to switch off the IST stack if the - * interrupted code was in user mode. The actual stack switch is done in - * entry_64.S + * Help handler running on a per-cpu (IST or entry trampoline) stack + * to switch to the normal thread stack if the interrupted code was in + * user mode. The actual stack switch is done in entry_64.S */ asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs) { - struct pt_regs *regs = task_pt_regs(current); - *regs = *eregs; + struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1; + if (regs != eregs) + *regs = *eregs; return regs; } NOKPROBE_SYMBOL(sync_regs); @@ -642,13 +643,13 @@ struct bad_iret_stack *fixup_bad_iret(st /* * This is called from entry_64.S early in handling a fault * caused by a bad iret to user mode. To handle the fault - * correctly, we want move our stack frame to task_pt_regs - * and we want to pretend that the exception came from the - * iret target. + * correctly, we want to move our stack frame to where it would + * be had we entered directly on the entry stack (rather than + * just below the IRET frame) and we want to pretend that the + * exception came from the IRET target. */ struct bad_iret_stack *new_stack = - container_of(task_pt_regs(current), - struct bad_iret_stack, regs); + (struct bad_iret_stack *)this_cpu_read(cpu_tss.x86_tss.sp0) - 1;
/* Copy the IRET target to the new stack. */ memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
Patches currently in stable-queue which might be from luto@kernel.org are
queue-4.14/x86-entry-64-simplify-reg-restore-code-in-the-standard-iret-paths.patch queue-4.14/x86-cpufeatures-fix-various-details-in-the-feature-definitions.patch queue-4.14/x86-entry-64-move-the-ist-stacks-into-struct-cpu_entry_area.patch queue-4.14/x86-dumpstack-add-get_stack_info-support-for-the-sysenter-stack.patch queue-4.14/x86-asm-don-t-use-the-confusing-.ifeq-directive.patch queue-4.14/selftests-x86-ldt_gdt-add-infrastructure-to-test-set_thread_area.patch queue-4.14/x86-entry-remap-the-tss-into-the-cpu-entry-area.patch queue-4.14/x86-entry-64-paravirt-use-paravirt-safe-macro-to-access-eflags.patch queue-4.14/x86-mm-fixmap-generalize-the-gdt-fixmap-mechanism-introduce-struct-cpu_entry_area.patch queue-4.14/x86-paravirt-dont-patch-flush_tlb_single.patch queue-4.14/x86-dumpstack-handle-stack-overflow-on-all-stacks.patch queue-4.14/x86-entry-64-use-pop-instead-of-movq-in-syscall_return_via_sysret.patch queue-4.14/x86-entry-64-return-to-userspace-from-the-trampoline-stack.patch queue-4.14/x86-paravirt-provide-a-way-to-check-for-hypervisors.patch queue-4.14/xen-x86-entry-64-add-xen-nmi-trap-entry.patch queue-4.14/x86-entry-64-remove-the-restore_c_regs_and_iret-label.patch queue-4.14/x86-entry-64-create-a-per-cpu-syscall-entry-trampoline.patch queue-4.14/x86-entry-64-remove-the-restore_..._regs-infrastructure.patch queue-4.14/x86-xen-64-x86-entry-64-clean-up-sp-code-in-cpu_initialize_context.patch queue-4.14/x86-entry-add-task_top_of_stack-to-find-the-top-of-a-task-s-stack.patch queue-4.14/x86-entry-64-remove-all-remaining-direct-thread_struct-sp0-reads.patch queue-4.14/x86-boot-relocate-definition-of-the-initial-state-of-cr0.patch queue-4.14/x86-entry-64-stop-initializing-tss.sp0-at-boot.patch queue-4.14/x86-entry-64-de-xen-ify-our-nmi-code.patch queue-4.14/objtool-don-t-report-end-of-section-error-after-an-empty-unwind-hint.patch queue-4.14/x86-entry-64-pass-sp0-directly-to-load_sp0.patch queue-4.14/x86-entry-64-use-a-per-cpu-trampoline-stack-for-idt-entries.patch queue-4.14/x86-entry-64-move-swapgs-into-the-common-iret-to-usermode-path.patch queue-4.14/x86-entry-64-shorten-test-instructions.patch queue-4.14/x86-cpufeature-add-user-mode-instruction-prevention-definitions.patch queue-4.14/x86-cpufeatures-make-cpu-bugs-sticky.patch queue-4.14/x86-espfix-64-stop-assuming-that-pt_regs-is-on-the-entry-stack.patch queue-4.14/x86-traps-use-a-new-on_thread_stack-helper-to-clean-up-an-assertion.patch queue-4.14/x86-xen-fix-xen-head-elf-annotations.patch queue-4.14/x86-entry-64-remove-thread_struct-sp0.patch queue-4.14/x86-entry-move-sysenter_stack-to-the-beginning-of-struct-tss_struct.patch queue-4.14/x86-entry-64-allocate-and-enable-the-sysenter-stack.patch queue-4.14/x86-unwinder-orc-dont-bail-on-stack-overflow.patch queue-4.14/x86-head-fix-head-elf-function-annotations.patch queue-4.14/selftests-x86-ldt_gdt-run-most-existing-ldt-test-cases-against-the-gdt-as-well.patch queue-4.14/x86-entry-32-pull-the-msr_ia32_sysenter_cs-update-code-out-of-native_load_sp0.patch queue-4.14/x86-entry-64-split-the-iret-to-user-and-iret-to-kernel-paths.patch queue-4.14/x86-entry-64-shrink-paranoid_exit_restore-and-make-labels-local.patch queue-4.14/x86-head-add-unwind-hint-annotations.patch queue-4.14/x86-head-remove-unused-bad_address-code.patch queue-4.14/x86-xen-add-unwind-hint-annotations.patch queue-4.14/x86-kasan-64-teach-kasan-about-the-cpu_entry_area.patch queue-4.14/x86-head-remove-confusing-comment.patch queue-4.14/x86-entry-64-remove-the-sysenter-stack-canary.patch queue-4.14/x86-mm-kasan-don-t-use-vmemmap_populate-to-initialize-shadow.patch queue-4.14/x86-entry-gdt-put-per-cpu-gdt-remaps-in-ascending-order.patch queue-4.14/x86-entry-fix-assumptions-that-the-hw-tss-is-at-the-beginning-of-cpu_tss.patch queue-4.14/x86-cpufeatures-re-tabulate-the-x86_feature-definitions.patch queue-4.14/x86-entry-32-fix-cpu_current_top_of_stack-initialization-at-boot.patch queue-4.14/ptrace-x86-make-user_64bit_mode-available-to-32-bit-builds.patch queue-4.14/x86-entry-64-make-cpu_entry_area.tss-read-only.patch queue-4.14/x86-mm-relocate-page-fault-error-codes-to-traps.h.patch queue-4.14/x86-unwinder-handle-stack-overflows-more-gracefully.patch queue-4.14/x86-entry-64-use-pop-instead-of-mov-to-restore-regs-on-nmi-return.patch queue-4.14/x86-irq-64-print-the-offending-ip-in-the-stack-overflow-warning.patch queue-4.14/x86-entry-clean-up-the-sysenter_stack-code.patch queue-4.14/x86-entry-64-merge-the-fast-and-slow-sysret-paths.patch queue-4.14/x86-entry-64-separate-cpu_current_top_of_stack-from-tss.sp0.patch queue-4.14/x86-boot-annotate-verify_cpu-as-a-callable-function.patch queue-4.14/x86-irq-remove-an-old-outdated-comment-about-context-tracking-races.patch