Changes in v2:
- included missing preliminary patch to define the SB barrier instruction
Will Deacon (2): arm64: Add support for SB barrier and patch in over DSB; ISB sequences arm64: entry: Place an SB sequence following an ERET instruction
arch/arm64/include/asm/assembler.h | 13 +++++++++++++ arch/arm64/include/asm/barrier.h | 4 ++++ arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/sysreg.h | 6 ++++++ arch/arm64/include/asm/uaccess.h | 3 +-- arch/arm64/include/uapi/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 12 ++++++++++++ arch/arm64/kernel/cpuinfo.c | 1 + arch/arm64/kernel/entry.S | 2 ++ arch/arm64/kvm/hyp/entry.S | 2 ++ arch/arm64/kvm/hyp/hyp-entry.S | 4 ++++ 11 files changed, 48 insertions(+), 3 deletions(-)
From: Will Deacon will.deacon@arm.com
commit bd4fb6d270bc423a9a4098108784f7f9254c4e6d upstream
We currently use a DSB; ISB sequence to inhibit speculation in set_fs(). Whilst this works for current CPUs, future CPUs may implement a new SB barrier instruction which acts as an architected speculation barrier.
On CPUs that support it, patch in an SB; NOP sequence over the DSB; ISB sequence and advertise the presence of the new instruction to userspace.
Signed-off-by: Will Deacon will.deacon@arm.com [florian: adjust conflicts for cpucaps.h and cpufeature.c] Signed-off-by: Florian Fainelli f.fainelli@gmail.com --- arch/arm64/include/asm/assembler.h | 13 +++++++++++++ arch/arm64/include/asm/barrier.h | 4 ++++ arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/sysreg.h | 6 ++++++ arch/arm64/include/asm/uaccess.h | 3 +-- arch/arm64/include/uapi/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 12 ++++++++++++ arch/arm64/kernel/cpuinfo.c | 1 + 8 files changed, 40 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 02d73d83f0de..d98fb3f52ee3 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -104,6 +104,19 @@ .endm
/* + * Speculation barrier + */ + .macro sb +alternative_if_not ARM64_HAS_SB + dsb nsh + isb +alternative_else + SB_BARRIER_INSN + nop +alternative_endif + .endm + +/* * Sanitise a 64-bit bounded index wrt speculation, returning zero if out * of bounds. */ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 0b0755c961ac..159329160fb4 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -33,6 +33,10 @@
#define csdb() asm volatile("hint #20" : : : "memory")
+#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \ + SB_BARRIER_INSN"nop\n", \ + ARM64_HAS_SB)) + #define mb() dsb(sy) #define rmb() dsb(ld) #define wmb() dsb(st) diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 2f8bd0388905..cf8588560b78 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -45,7 +45,8 @@ #define ARM64_SSBD 25 #define ARM64_MISMATCHED_CACHE_TYPE 26 #define ARM64_SSBS 27 +#define ARM64_HAS_SB 28
-#define ARM64_NCAPS 28 +#define ARM64_NCAPS 29
#endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 2564dd429ab6..fe6ffceaf27f 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -97,6 +97,11 @@ #define SET_PSTATE_SSBS(x) __emit_inst(0xd5000000 | REG_PSTATE_SSBS_IMM | \ (!!x)<<8 | 0x1f)
+#define __SYS_BARRIER_INSN(CRm, op2, Rt) \ + __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f)) + +#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31) + #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2) #define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2) @@ -398,6 +403,7 @@ #define ID_AA64ISAR0_AES_SHIFT 4
/* id_aa64isar1 */ +#define ID_AA64ISAR1_SB_SHIFT 36 #define ID_AA64ISAR1_LRCPC_SHIFT 20 #define ID_AA64ISAR1_FCMA_SHIFT 16 #define ID_AA64ISAR1_JSCVT_SHIFT 12 diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index fad8c1b2ca3e..b6392026e27b 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -46,8 +46,7 @@ static inline void set_fs(mm_segment_t fs) * Prevent a mispredicted conditional call to set_fs from forwarding * the wrong address limit to access_ok under speculation. */ - dsb(nsh); - isb(); + spec_bar();
/* On user-mode return, check fs is correct */ set_thread_flag(TIF_FSCHECK); diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 2bcd6e4f3474..7784f7cba16c 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -49,5 +49,6 @@ #define HWCAP_ILRCPC (1 << 26) #define HWCAP_FLAGM (1 << 27) #define HWCAP_SSBS (1 << 28) +#define HWCAP_SB (1 << 29)
#endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 6b3bb67596ae..0bb0c627ec25 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -122,6 +122,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { };
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0), @@ -1129,6 +1130,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_ssbs, }, #endif + { + .desc = "Speculation barrier (SB)", + .capability = ARM64_HAS_SB, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64ISAR1_EL1, + .field_pos = ID_AA64ISAR1_SB_SHIFT, + .sign = FTR_UNSIGNED, + .min_field_value = 1, + }, {}, };
@@ -1183,6 +1194,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SB), HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT), HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS), {}, diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 9ff64e04e63d..c45b488d2564 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -81,6 +81,7 @@ static const char *const hwcap_str[] = { "ilrcpc", "flagm", "ssbs", + "sb", NULL };
From: Will Deacon will.deacon@arm.com
commit 679db70801da9fda91d26caf13bf5b5ccc74e8e8 upstream
Some CPUs can speculate past an ERET instruction and potentially perform speculative accesses to memory before processing the exception return. Since the register state is often controlled by a lower privilege level at the point of an ERET, this could potentially be used as part of a side-channel attack.
This patch emits an SB sequence after each ERET so that speculation is held up on exception return.
Signed-off-by: Will Deacon will.deacon@arm.com [florian: update arch/arm64/kvm/entry.S::__fpsimd_guest_restore] Signed-off-by: Florian Fainelli f.fainelli@gmail.com --- arch/arm64/kernel/entry.S | 2 ++ arch/arm64/kvm/hyp/entry.S | 2 ++ arch/arm64/kvm/hyp/hyp-entry.S | 4 ++++ 3 files changed, 8 insertions(+)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index c1ffa95c0ad2..f70e0893ba51 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -367,6 +367,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 .else eret .endif + sb .endm
.macro irq_stack_entry @@ -1046,6 +1047,7 @@ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 mrs x30, far_el1 .endif eret + sb .endm
.align 11 diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index a360ac6e89e9..93704e6894d2 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -83,6 +83,7 @@ ENTRY(__guest_enter)
// Do not touch any register after this! eret + sb ENDPROC(__guest_enter)
ENTRY(__guest_exit) @@ -195,4 +196,5 @@ alternative_endif ldp x0, x1, [sp], #16
eret + sb ENDPROC(__fpsimd_guest_restore) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 3c283fd8c8f5..b4d6a6c6c6ce 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -96,6 +96,7 @@ el1_sync: // Guest trapped into EL2 do_el2_call
eret + sb
el1_hvc_guest: /* @@ -146,6 +147,7 @@ wa_epilogue: mov x0, xzr add sp, sp, #16 eret + sb
el1_trap: get_vcpu_ptr x1, x0 @@ -204,6 +206,7 @@ el2_error: b.ne __hyp_panic mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) eret + sb
ENTRY(__hyp_do_panic) mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ @@ -212,6 +215,7 @@ ENTRY(__hyp_do_panic) ldr lr, =panic msr elr_el2, lr eret + sb ENDPROC(__hyp_do_panic)
ENTRY(__hyp_panic)
linux-stable-mirror@lists.linaro.org