From: James Morse james.morse@arm.com
commit aff65393fa1401e034656e349abd655cfe272de0 upstream.
kpti is an optional feature, for systems not using kpti a set of vectors for the spectre-bhb mitigations is needed.
Add another set of vectors, __bp_harden_el1_vectors, that will be used if a mitigation is needed and kpti is not in use.
The EL1 ventries are repeated verbatim as there is no additional work needed for entry from EL1.
Reviewed-by: Russell King (Oracle) rmk+kernel@armlinux.org.uk Reviewed-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: James Morse james.morse@arm.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org --- arch/arm64/kernel/entry.S | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-)
--- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -973,10 +973,11 @@ alternative_else_nop_endif .macro tramp_ventry, vector_start, regsize, kpti .align 7 1: - .if \kpti == 1 .if \regsize == 64 msr tpidrro_el0, x30 // Restored in kernel_ventry .endif + + .if \kpti == 1 /* * Defend against branch aliasing attacks by pushing a dummy * entry onto the return stack and using a RET instruction to @@ -1060,6 +1061,38 @@ __entry_tramp_data_start: #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
/* + * Exception vectors for spectre mitigations on entry from EL1 when + * kpti is not in use. + */ + .macro generate_el1_vector +.Lvector_start@: + kernel_ventry 1, sync_invalid // Synchronous EL1t + kernel_ventry 1, irq_invalid // IRQ EL1t + kernel_ventry 1, fiq_invalid // FIQ EL1t + kernel_ventry 1, error_invalid // Error EL1t + + kernel_ventry 1, sync // Synchronous EL1h + kernel_ventry 1, irq // IRQ EL1h + kernel_ventry 1, fiq_invalid // FIQ EL1h + kernel_ventry 1, error // Error EL1h + + .rept 4 + tramp_ventry .Lvector_start@, 64, kpti=0 + .endr + .rept 4 + tramp_ventry .Lvector_start@, 32, kpti=0 + .endr + .endm + + .pushsection ".entry.text", "ax" + .align 11 +ENTRY(__bp_harden_el1_vectors) + generate_el1_vector +END(__bp_harden_el1_vectors) + .popsection + + +/* * Register switch for AArch64. The callee-saved registers need to be saved * and restored. On entry: * x0 = previous task_struct (must be preserved across the switch)