All occurrences of the scs_load macro load the value of the shadow call stack pointer from the task which is current at that point. So instead of taking a task struct register argument in the scs_load macro to specify the task struct to load from, let's always reference the current task directly. This should make it much harder to exploit any instruction sequences reloading the shadow call stack pointer register from memory.
Signed-off-by: Ard Biesheuvel ardb@kernel.org Acked-by: Mark Rutland mark.rutland@arm.com Reviewed-by: Kees Cook keescook@chromium.org Link: https://lore.kernel.org/r/20230109174800.3286265-2-ardb@kernel.org Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Ard Biesheuvel ardb@kernel.org --- arch/arm64/include/asm/scs.h | 7 ++++--- arch/arm64/kernel/entry.S | 4 ++-- arch/arm64/kernel/head.S | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h index eaa2cd92e4c10122..7155055a5bebc17d 100644 --- a/arch/arm64/include/asm/scs.h +++ b/arch/arm64/include/asm/scs.h @@ -9,15 +9,16 @@ #ifdef CONFIG_SHADOW_CALL_STACK scs_sp .req x18
- .macro scs_load tsk, tmp - ldr scs_sp, [\tsk, #TSK_TI_SCS_SP] + .macro scs_load_current + get_current_task scs_sp + ldr scs_sp, [scs_sp, #TSK_TI_SCS_SP] .endm
.macro scs_save tsk, tmp str scs_sp, [\tsk, #TSK_TI_SCS_SP] .endm #else - .macro scs_load tsk, tmp + .macro scs_load_current .endm
.macro scs_save tsk, tmp diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index d5bc1dbdd2fda84c..28d4cdeee5ae6083 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -221,7 +221,7 @@ alternative_else_nop_endif
ptrauth_keys_install_kernel tsk, x20, x22, x23
- scs_load tsk, x20 + scs_load_current .else add x21, sp, #S_FRAME_SIZE get_current_task tsk @@ -1025,7 +1025,7 @@ SYM_FUNC_START(cpu_switch_to) msr sp_el0, x1 ptrauth_keys_install_kernel x1, x8, x9, x10 scs_save x0, x8 - scs_load x1, x8 + scs_load_current ret SYM_FUNC_END(cpu_switch_to) NOKPROBE(cpu_switch_to) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index e1c25fa3b8e6ca65..351ee64c7deb4c96 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -747,7 +747,7 @@ SYM_FUNC_START_LOCAL(__secondary_switched) ldr x2, [x0, #CPU_BOOT_TASK] cbz x2, __secondary_too_slow msr sp_el0, x2 - scs_load x2, x3 + scs_load_current mov x29, #0 mov x30, #0
commit 59b37fe52f49955791a460752c37145f1afdcad1 upstream.
Instead of reloading the shadow call stack pointer from the ordinary stack *, which may be vulnerable to the kind of gadget based attacks shadow call stacks were designed to prevent, let's store a task's shadow call stack pointer in the task struct when switching to the shadow IRQ stack.
Given that currently, the task_struct::scs_sp field is only used to preserve the shadow call stack pointer while a task is scheduled out or running in user space, reusing this field to preserve and restore it while running off the IRQ stack must be safe, as those occurrences are guaranteed to never overlap. (The stack switching logic only switches stacks when running from the task stack, and so the value being saved here always corresponds to the task mode shadow stack)
Signed-off-by: Ard Biesheuvel ardb@kernel.org Reviewed-by: Kees Cook keescook@chromium.org Acked-by: Mark Rutland mark.rutland@arm.com Link: https://lore.kernel.org/r/20230109174800.3286265-3-ardb@kernel.org Signed-off-by: Catalin Marinas catalin.marinas@arm.com [ardb: v5.10 backport, which doesn't have call_on_irq_stack() yet *] Signed-off-by: Ard Biesheuvel ardb@kernel.org --- arch/arm64/kernel/entry.S | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 28d4cdeee5ae6083..55e477f73158d6f8 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -431,9 +431,7 @@ SYM_CODE_END(__swpan_exit_el0)
.macro irq_stack_entry mov x19, sp // preserve the original sp -#ifdef CONFIG_SHADOW_CALL_STACK - mov x24, scs_sp // preserve the original shadow stack -#endif + scs_save tsk // preserve the original shadow stack
/* * Compare sp with the base of the task stack. @@ -467,9 +465,7 @@ SYM_CODE_END(__swpan_exit_el0) */ .macro irq_stack_exit mov sp, x19 -#ifdef CONFIG_SHADOW_CALL_STACK - mov scs_sp, x24 -#endif + scs_load_current .endm
/* GPRs used by entry code */
On Sat, May 06, 2023 at 02:34:33PM +0200, Ard Biesheuvel wrote:
All occurrences of the scs_load macro load the value of the shadow call stack pointer from the task which is current at that point. So instead of taking a task struct register argument in the scs_load macro to specify the task struct to load from, let's always reference the current task directly. This should make it much harder to exploit any instruction sequences reloading the shadow call stack pointer register from memory.
Signed-off-by: Ard Biesheuvel ardb@kernel.org Acked-by: Mark Rutland mark.rutland@arm.com Reviewed-by: Kees Cook keescook@chromium.org Link: https://lore.kernel.org/r/20230109174800.3286265-2-ardb@kernel.org Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Ard Biesheuvel ardb@kernel.org
arch/arm64/include/asm/scs.h | 7 ++++--- arch/arm64/kernel/entry.S | 4 ++-- arch/arm64/kernel/head.S | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-)
What is the git commit id of this in Linus's tree?
thanks,
greg k-h
On Sun, 7 May 2023 at 07:11, Greg KH gregkh@linuxfoundation.org wrote:
On Sat, May 06, 2023 at 02:34:33PM +0200, Ard Biesheuvel wrote:
All occurrences of the scs_load macro load the value of the shadow call stack pointer from the task which is current at that point. So instead of taking a task struct register argument in the scs_load macro to specify the task struct to load from, let's always reference the current task directly. This should make it much harder to exploit any instruction sequences reloading the shadow call stack pointer register from memory.
Signed-off-by: Ard Biesheuvel ardb@kernel.org Acked-by: Mark Rutland mark.rutland@arm.com Reviewed-by: Kees Cook keescook@chromium.org Link: https://lore.kernel.org/r/20230109174800.3286265-2-ardb@kernel.org Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Ard Biesheuvel ardb@kernel.org
arch/arm64/include/asm/scs.h | 7 ++++--- arch/arm64/kernel/entry.S | 4 ++-- arch/arm64/kernel/head.S | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-)
What is the git commit id of this in Linus's tree?
commit 2198d07c509f1db4a1185d1f65aaada794c6ea59 upstream.
Thanks,
On Sun, May 07, 2023 at 10:11:32AM +0200, Ard Biesheuvel wrote:
On Sun, 7 May 2023 at 07:11, Greg KH gregkh@linuxfoundation.org wrote:
On Sat, May 06, 2023 at 02:34:33PM +0200, Ard Biesheuvel wrote:
All occurrences of the scs_load macro load the value of the shadow call stack pointer from the task which is current at that point. So instead of taking a task struct register argument in the scs_load macro to specify the task struct to load from, let's always reference the current task directly. This should make it much harder to exploit any instruction sequences reloading the shadow call stack pointer register from memory.
Signed-off-by: Ard Biesheuvel ardb@kernel.org Acked-by: Mark Rutland mark.rutland@arm.com Reviewed-by: Kees Cook keescook@chromium.org Link: https://lore.kernel.org/r/20230109174800.3286265-2-ardb@kernel.org Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Ard Biesheuvel ardb@kernel.org
arch/arm64/include/asm/scs.h | 7 ++++--- arch/arm64/kernel/entry.S | 4 ++-- arch/arm64/kernel/head.S | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-)
What is the git commit id of this in Linus's tree?
commit 2198d07c509f1db4a1185d1f65aaada794c6ea59 upstream.
Thanks, both now queued up!
greg k-h
linux-stable-mirror@lists.linaro.org