[ Upstream commit 7187bb7d0b5c7dfa18ca82e9e5c75e13861b1d88 ]
Cortex-X4 and Neoverse-V3 suffer from errata whereby an MSR to the SSBS special-purpose register does not affect subsequent speculative instructions, permitting speculative store bypassing for a window of time. This is described in their Software Developer Errata Notice (SDEN) documents:
* Cortex-X4 SDEN v8.0, erratum 3194386: https://developer.arm.com/documentation/SDEN-2432808/0800/
* Neoverse-V3 SDEN v6.0, erratum 3312417: https://developer.arm.com/documentation/SDEN-2891958/0600/
To workaround these errata, it is necessary to place a speculation barrier (SB) after MSR to the SSBS special-purpose register. This patch adds the requisite SB after writes to SSBS within the kernel, and hides the presence of SSBS from EL0 such that userspace software which cares about SSBS will manipulate this via prctl(PR_GET_SPECULATION_CTRL, ...).
Signed-off-by: Mark Rutland mark.rutland@arm.com Cc: Catalin Marinas catalin.marinas@arm.com Cc: James Morse james.morse@arm.com Cc: Will Deacon will@kernel.org Link: https://lore.kernel.org/r/20240508081400.235362-5-mark.rutland@arm.com Signed-off-by: Will Deacon will@kernel.org [ Mark: fix conflicts, drop unneeded cpucaps.h, fold in user_feature_fixup() ] Signed-off-by: Mark Rutland mark.rutland@arm.com --- Documentation/arch/arm64/silicon-errata.rst | 4 ++ arch/arm64/Kconfig | 42 +++++++++++++++++++++ arch/arm64/kernel/cpu_errata.c | 19 ++++++++++ arch/arm64/kernel/cpufeature.c | 12 ++++++ arch/arm64/kernel/proton-pack.c | 12 ++++++ arch/arm64/tools/cpucaps | 1 + 6 files changed, 90 insertions(+)
diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst index 29fd5213eeb2b..f8e49ff9ab0d4 100644 --- a/Documentation/arch/arm64/silicon-errata.rst +++ b/Documentation/arch/arm64/silicon-errata.rst @@ -133,6 +133,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-X4 | #3194386 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1349291 | N/A | @@ -145,6 +147,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N2 | #2253138 | ARM64_ERRATUM_2253138 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3312417 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-500 | #841119,826419 | N/A | +----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-600 | #1076982,1209401| N/A | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index f9777ce2ccb2d..db5560300df84 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1068,6 +1068,48 @@ config ARM64_ERRATUM_3117295
If unsure, say Y.
+config ARM64_WORKAROUND_SPECULATIVE_SSBS + bool + +config ARM64_ERRATUM_3194386 + bool "Cortex-X4: 3194386: workaround for MSR SSBS not self-synchronizing" + select ARM64_WORKAROUND_SPECULATIVE_SSBS + default y + help + This option adds the workaround for ARM Cortex-X4 erratum 3194386. + + On affected cores "MSR SSBS, #0" instructions may not affect + subsequent speculative instructions, which may permit unexepected + speculative store bypassing. + + Work around this problem by placing a speculation barrier after + kernel changes to SSBS. The presence of the SSBS special-purpose + register is hidden from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such + that userspace will use the PR_SPEC_STORE_BYPASS prctl to change + SSBS. + + If unsure, say Y. + +config ARM64_ERRATUM_3312417 + bool "Neoverse-V3: 3312417: workaround for MSR SSBS not self-synchronizing" + select ARM64_WORKAROUND_SPECULATIVE_SSBS + default y + help + This option adds the workaround for ARM Neoverse-V3 erratum 3312417. + + On affected cores "MSR SSBS, #0" instructions may not affect + subsequent speculative instructions, which may permit unexepected + speculative store bypassing. + + Work around this problem by placing a speculation barrier after + kernel changes to SSBS. The presence of the SSBS special-purpose + register is hidden from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such + that userspace will use the PR_SPEC_STORE_BYPASS prctl to change + SSBS. + + If unsure, say Y. + + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 7bba831f62c33..3e1554a58209b 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -448,6 +448,18 @@ static const struct midr_range erratum_spec_unpriv_load_list[] = { }; #endif
+#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS +static const struct midr_range erratum_spec_ssbs_list[] = { +#ifdef CONFIG_ARM64_ERRATUM_3194386 + MIDR_ALL_VERSIONS(MIDR_CORTEX_X4), +#endif +#ifdef CONFIG_ARM64_ERRATUM_3312417 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3), +#endif + {} +}; +#endif + const struct arm64_cpu_capabilities arm64_errata[] = { #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE { @@ -746,6 +758,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .cpu_enable = cpu_clear_bf16_from_user_emulation, }, #endif +#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS + { + .desc = "ARM errata 3194386, 3312417", + .capability = ARM64_WORKAROUND_SPECULATIVE_SSBS, + ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list), + }, +#endif #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD { .desc = "ARM errata 2966298, 3117295", diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 444a73c2e6385..7e96604559004 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2190,6 +2190,17 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) } #endif /* CONFIG_ARM64_MTE */
+static void user_feature_fixup(void) +{ + if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_SSBS)) { + struct arm64_ftr_reg *regp; + + regp = get_arm64_ftr_reg(SYS_ID_AA64PFR1_EL1); + if (regp) + regp->user_mask &= ~ID_AA64PFR1_EL1_SSBS_MASK; + } +} + static void elf_hwcap_fixup(void) { #ifdef CONFIG_ARM64_ERRATUM_1742098 @@ -3345,6 +3356,7 @@ void __init setup_cpu_features(void) u32 cwg;
setup_system_capabilities(); + user_feature_fixup(); setup_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0()) { diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index 05f40c4e18fda..e662c4d2856b6 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -558,6 +558,18 @@ static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
/* SCTLR_EL1.DSSBS was initialised to 0 during boot */ set_pstate_ssbs(0); + + /* + * SSBS is self-synchronizing and is intended to affect subsequent + * speculative instructions, but some CPUs can speculate with a stale + * value of SSBS. + * + * Mitigate this with an unconditional speculation barrier, as CPUs + * could mis-speculate branches and bypass a conditional barrier. + */ + if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS)) + spec_bar(); + return SPECTRE_MITIGATED; }
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 5511bee15603a..c251ef3caae56 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -99,4 +99,5 @@ WORKAROUND_NVIDIA_CARMEL_CNP WORKAROUND_QCOM_FALKOR_E1003 WORKAROUND_REPEAT_TLBI WORKAROUND_SPECULATIVE_AT +WORKAROUND_SPECULATIVE_SSBS WORKAROUND_SPECULATIVE_UNPRIV_LOAD