Changes in v2:
- included missing preliminary patch to define the SB barrier instruction
Will Deacon (2): arm64: Add support for SB barrier and patch in over DSB; ISB sequences arm64: entry: Place an SB sequence following an ERET instruction
arch/arm64/include/asm/assembler.h | 13 +++++++++++++ arch/arm64/include/asm/barrier.h | 4 ++++ arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/sysreg.h | 6 ++++++ arch/arm64/include/asm/uaccess.h | 3 +-- arch/arm64/include/uapi/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 12 ++++++++++++ arch/arm64/kernel/cpuinfo.c | 1 + arch/arm64/kernel/entry.S | 2 ++ arch/arm64/kvm/hyp/entry.S | 1 + arch/arm64/kvm/hyp/hyp-entry.S | 4 ++++ 11 files changed, 47 insertions(+), 3 deletions(-)
From: Will Deacon will.deacon@arm.com
commit bd4fb6d270bc423a9a4098108784f7f9254c4e6d upstream
We currently use a DSB; ISB sequence to inhibit speculation in set_fs(). Whilst this works for current CPUs, future CPUs may implement a new SB barrier instruction which acts as an architected speculation barrier.
On CPUs that support it, patch in an SB; NOP sequence over the DSB; ISB sequence and advertise the presence of the new instruction to userspace.
Signed-off-by: Will Deacon will.deacon@arm.com [florian: adjust conflicts in cpucaps.h and cpufeature.c] Signed-off-by: Florian Fainelli f.fainelli@gmail.com --- arch/arm64/include/asm/assembler.h | 13 +++++++++++++ arch/arm64/include/asm/barrier.h | 4 ++++ arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/sysreg.h | 6 ++++++ arch/arm64/include/asm/uaccess.h | 3 +-- arch/arm64/include/uapi/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 12 ++++++++++++ arch/arm64/kernel/cpuinfo.c | 1 + 8 files changed, 40 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 5a97ac853168..45ca06f3ddcb 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -127,6 +127,19 @@ .endm
/* + * Speculation barrier + */ + .macro sb +alternative_if_not ARM64_HAS_SB + dsb nsh + isb +alternative_else + SB_BARRIER_INSN + nop +alternative_endif + .endm + +/* * Sanitise a 64-bit bounded index wrt speculation, returning zero if out * of bounds. */ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 822a9192c551..f66bb04fdf2d 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -34,6 +34,10 @@ #define psb_csync() asm volatile("hint #17" : : : "memory") #define csdb() asm volatile("hint #20" : : : "memory")
+#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \ + SB_BARRIER_INSN"nop\n", \ + ARM64_HAS_SB)) + #define mb() dsb(sy) #define rmb() dsb(ld) #define wmb() dsb(st) diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index df8fe8ecc37e..383451eca5c6 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -54,7 +54,8 @@ #define ARM64_WORKAROUND_1463225 33 #define ARM64_SSBS 34 #define ARM64_WORKAROUND_1542419 35 +#define ARM64_HAS_SB 36
-#define ARM64_NCAPS 36 +#define ARM64_NCAPS 37
#endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index ed99d941c462..582075fad6c5 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -97,6 +97,11 @@ #define SET_PSTATE_SSBS(x) __emit_inst(0xd5000000 | REG_PSTATE_SSBS_IMM | \ (!!x)<<8 | 0x1f)
+#define __SYS_BARRIER_INSN(CRm, op2, Rt) \ + __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f)) + +#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31) + #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2) #define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2) @@ -521,6 +526,7 @@ #define ID_AA64ISAR0_AES_SHIFT 4
/* id_aa64isar1 */ +#define ID_AA64ISAR1_SB_SHIFT 36 #define ID_AA64ISAR1_LRCPC_SHIFT 20 #define ID_AA64ISAR1_FCMA_SHIFT 16 #define ID_AA64ISAR1_JSCVT_SHIFT 12 diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index e66b0fca99c2..3c3bf4171f3b 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -46,8 +46,7 @@ static inline void set_fs(mm_segment_t fs) * Prevent a mispredicted conditional call to set_fs from forwarding * the wrong address limit to access_ok under speculation. */ - dsb(nsh); - isb(); + spec_bar();
/* On user-mode return, check fs is correct */ set_thread_flag(TIF_FSCHECK); diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 2bcd6e4f3474..7784f7cba16c 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -49,5 +49,6 @@ #define HWCAP_ILRCPC (1 << 26) #define HWCAP_FLAGM (1 << 27) #define HWCAP_SSBS (1 << 28) +#define HWCAP_SB (1 << 29)
#endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index ac3126aba036..9cc917277a82 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -138,6 +138,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { };
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0), @@ -1336,6 +1337,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_ssbs, }, #endif + { + .desc = "Speculation barrier (SB)", + .capability = ARM64_HAS_SB, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64ISAR1_EL1, + .field_pos = ID_AA64ISAR1_SB_SHIFT, + .sign = FTR_UNSIGNED, + .min_field_value = 1, + }, {}, };
@@ -1390,6 +1401,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SB), HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT), #ifdef CONFIG_ARM64_SVE HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE), diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index dce971f2c167..63a49a66a28c 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -82,6 +82,7 @@ static const char *const hwcap_str[] = { "ilrcpc", "flagm", "ssbs", + "sb", NULL };
From: Will Deacon will.deacon@arm.com
commit 679db70801da9fda91d26caf13bf5b5ccc74e8e8 upstream
Some CPUs can speculate past an ERET instruction and potentially perform speculative accesses to memory before processing the exception return. Since the register state is often controlled by a lower privilege level at the point of an ERET, this could potentially be used as part of a side-channel attack.
This patch emits an SB sequence after each ERET so that speculation is held up on exception return.
Signed-off-by: Will Deacon will.deacon@arm.com Signed-off-by: Florian Fainelli f.fainelli@gmail.com --- arch/arm64/kernel/entry.S | 2 ++ arch/arm64/kvm/hyp/entry.S | 1 + arch/arm64/kvm/hyp/hyp-entry.S | 4 ++++ 3 files changed, 7 insertions(+)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 5f800384cb9a..49f80b5627fa 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -363,6 +363,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 .else eret .endif + sb .endm
.macro irq_stack_entry @@ -994,6 +995,7 @@ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 mrs x30, far_el1 .endif eret + sb .endm
.align 11 diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index fad1e164fe48..675fdc186e3b 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -83,6 +83,7 @@ ENTRY(__guest_enter)
// Do not touch any register after this! eret + sb ENDPROC(__guest_enter)
ENTRY(__guest_exit) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 24b4fbafe3e4..e35abf84eb96 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -96,6 +96,7 @@ el1_sync: // Guest trapped into EL2 do_el2_call
eret + sb
el1_hvc_guest: /* @@ -146,6 +147,7 @@ wa_epilogue: mov x0, xzr add sp, sp, #16 eret + sb
el1_trap: get_vcpu_ptr x1, x0 @@ -185,6 +187,7 @@ el2_error: b.ne __hyp_panic mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) eret + sb
ENTRY(__hyp_do_panic) mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ @@ -193,6 +196,7 @@ ENTRY(__hyp_do_panic) ldr lr, =panic msr elr_el2, lr eret + sb ENDPROC(__hyp_do_panic)
ENTRY(__hyp_panic)
On 8/24/2020 11:35 AM, Florian Fainelli wrote:
Changes in v2:
- included missing preliminary patch to define the SB barrier instruction
Will Deacon (2): arm64: Add support for SB barrier and patch in over DSB; ISB sequences arm64: entry: Place an SB sequence following an ERET instruction
Does anybody at ARM or Android care about those changes? If so, would you be willing to review these?
Thanks
arch/arm64/include/asm/assembler.h | 13 +++++++++++++ arch/arm64/include/asm/barrier.h | 4 ++++ arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/sysreg.h | 6 ++++++ arch/arm64/include/asm/uaccess.h | 3 +-- arch/arm64/include/uapi/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 12 ++++++++++++ arch/arm64/kernel/cpuinfo.c | 1 + arch/arm64/kernel/entry.S | 2 ++ arch/arm64/kvm/hyp/entry.S | 1 + arch/arm64/kvm/hyp/hyp-entry.S | 4 ++++ 11 files changed, 47 insertions(+), 3 deletions(-)
linux-stable-mirror@lists.linaro.org