This is a note to let you know that I've just added the patch titled
[Variant 3/Meltdown] arm64: mm: Allocate ASIDs in pairs
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
arm64-mm-allocate-asids-in-pairs.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Tue Feb 13 17:25:10 CET 2018
From: Will Deacon <will.deacon(a)arm.com>
Date: Thu, 10 Aug 2017 14:10:28 +0100
Subject: [Variant 3/Meltdown] arm64: mm: Allocate ASIDs in pairs
From: Will Deacon <will.deacon(a)arm.com>
Commit 0c8ea531b774 upstream.
In preparation for separate kernel/user ASIDs, allocate them in pairs
for each mm_struct. The bottom bit distinguishes the two: if it is set,
then the ASID will map only userspace.
Reviewed-by: Mark Rutland <mark.rutland(a)arm.com>
Tested-by: Laura Abbott <labbott(a)redhat.com>
Tested-by: Shanker Donthineni <shankerd(a)codeaurora.org>
Signed-off-by: Will Deacon <will.deacon(a)arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/arm64/include/asm/mmu.h | 1 +
arch/arm64/mm/context.c | 25 +++++++++++++++++--------
2 files changed, 18 insertions(+), 8 deletions(-)
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -17,6 +17,7 @@
#define __ASM_MMU_H
#define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
+#define USER_ASID_FLAG (UL(1) << 48)
typedef struct {
atomic64_t id;
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -39,7 +39,16 @@ static cpumask_t tlb_flush_pending;
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
#define ASID_FIRST_VERSION (1UL << asid_bits)
-#define NUM_USER_ASIDS ASID_FIRST_VERSION
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1)
+#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1)
+#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK)
+#else
+#define NUM_USER_ASIDS (ASID_FIRST_VERSION)
+#define asid2idx(asid) ((asid) & ~ASID_MASK)
+#define idx2asid(idx) asid2idx(idx)
+#endif
/* Get the ASIDBits supported by the current CPU */
static u32 get_cpu_asid_bits(void)
@@ -98,7 +107,7 @@ static void flush_context(unsigned int c
*/
if (asid == 0)
asid = per_cpu(reserved_asids, i);
- __set_bit(asid & ~ASID_MASK, asid_map);
+ __set_bit(asid2idx(asid), asid_map);
per_cpu(reserved_asids, i) = asid;
}
@@ -153,16 +162,16 @@ static u64 new_context(struct mm_struct
* We had a valid ASID in a previous life, so try to re-use
* it if possible.
*/
- asid &= ~ASID_MASK;
- if (!__test_and_set_bit(asid, asid_map))
+ if (!__test_and_set_bit(asid2idx(asid), asid_map))
return newasid;
}
/*
* Allocate a free ASID. If we can't find one, take a note of the
- * currently active ASIDs and mark the TLBs as requiring flushes.
- * We always count from ASID #1, as we use ASID #0 when setting a
- * reserved TTBR0 for the init_mm.
+ * currently active ASIDs and mark the TLBs as requiring flushes. We
+ * always count from ASID #2 (index 1), as we use ASID #0 when setting
+ * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
+ * pairs.
*/
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
if (asid != NUM_USER_ASIDS)
@@ -179,7 +188,7 @@ static u64 new_context(struct mm_struct
set_asid:
__set_bit(asid, asid_map);
cur_idx = asid;
- return asid | generation;
+ return idx2asid(asid) | generation;
}
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
Patches currently in stable-queue which might be from will.deacon(a)arm.com are
queue-4.15/arm64-make-user_ds-an-inclusive-limit.patch
queue-4.15/arm64-mm-remove-pre_ttbr0_update_workaround-for-falkor-erratum-e1003.patch
queue-4.15/arm64-uaccess-don-t-bother-eliding-access_ok-checks-in-__-get-put-_user.patch
queue-4.15/arm64-cpufeature-pass-capability-structure-to-enable-callback.patch
queue-4.15/arm64-uaccess-mask-__user-pointers-for-__arch_-clear-copy_-_user.patch
queue-4.15/arm64-mm-add-arm64_kernel_unmapped_at_el0-helper.patch
queue-4.15/arm64-entry-reword-comment-about-post_ttbr_update_workaround.patch
queue-4.15/arm64-kaslr-put-kernel-vectors-address-in-separate-data-page.patch
queue-4.15/arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch
queue-4.15/arm64-move-bp-hardening-to-check_and_switch_context.patch
queue-4.15/arm-arm64-kvm-advertise-smccc-v1.1.patch
queue-4.15/arm64-move-post_ttbr_update_workaround-to-c-code.patch
queue-4.15/arm64-turn-on-kpti-only-on-cpus-that-need-it.patch
queue-4.15/firmware-psci-expose-psci-conduit.patch
queue-4.15/arm64-force-kpti-to-be-disabled-on-cavium-thunderx.patch
queue-4.15/arm64-entry-apply-bp-hardening-for-high-priority-synchronous-exceptions.patch
queue-4.15/arm64-kpti-fix-the-interaction-between-asid-switching-and-software-pan.patch
queue-4.15/firmware-psci-expose-smccc-version-through-psci_ops.patch
queue-4.15/arm64-mm-permit-transitioning-from-global-to-non-global-without-bbm.patch
queue-4.15/arm64-mm-allocate-asids-in-pairs.patch
queue-4.15/arm64-tls-avoid-unconditional-zeroing-of-tpidrro_el0-for-native-tasks.patch
queue-4.15/arm64-use-ret-instruction-for-exiting-the-trampoline.patch
queue-4.15/arm64-futex-mask-__user-pointers-prior-to-dereference.patch
queue-4.15/arm64-entry-explicitly-pass-exception-level-to-kernel_ventry-macro.patch
queue-4.15/arm64-implement-branch-predictor-hardening-for-affected-cortex-a-cpus.patch
queue-4.15/arm64-kpti-make-use-of-ng-dependent-on-arm64_kernel_unmapped_at_el0.patch
queue-4.15/arm-arm64-kvm-add-psci_version-helper.patch
queue-4.15/arm64-kill-psci_get_version-as-a-variant-2-workaround.patch
queue-4.15/arm64-entry-ensure-branch-through-syscall-table-is-bounded-under-speculation.patch
queue-4.15/arm64-mm-use-non-global-mappings-for-kernel-space.patch
queue-4.15/arm64-entry-apply-bp-hardening-for-suspicious-interrupts-from-el0.patch
queue-4.15/arm64-capabilities-handle-duplicate-entries-for-a-capability.patch
queue-4.15/arm64-entry-hook-up-entry-trampoline-to-exception-vectors.patch
queue-4.15/arm64-branch-predictor-hardening-for-cavium-thunderx2.patch
queue-4.15/arm64-uaccess-prevent-speculative-use-of-the-current-addr_limit.patch
queue-4.15/arm64-use-pointer-masking-to-limit-uaccess-speculation.patch
queue-4.15/arm64-add-arm_smccc_arch_workaround_1-bp-hardening-support.patch
queue-4.15/arm64-erratum-work-around-falkor-erratum-e1003-in-trampoline-code.patch
queue-4.15/arm64-mm-fix-and-re-enable-arm64_sw_ttbr0_pan.patch
queue-4.15/arm64-mm-invalidate-both-kernel-and-user-asids-when-performing-tlbi.patch
queue-4.15/drivers-firmware-expose-psci_get_version-through-psci_ops-structure.patch
queue-4.15/arm64-mm-rename-post_ttbr0_update_workaround.patch
queue-4.15/arm64-mm-map-entry-trampoline-into-trampoline-and-kernel-page-tables.patch
queue-4.15/arm-arm64-kvm-turn-kvm_psci_version-into-a-static-inline.patch
queue-4.15/arm-arm64-kvm-implement-psci-1.0-support.patch
queue-4.15/arm64-kconfig-reword-unmap_kernel_at_el0-kconfig-entry.patch
queue-4.15/arm64-mm-move-asid-from-ttbr0-to-ttbr1.patch
queue-4.15/arm64-mm-introduce-ttbr_asid_mask-for-getting-at-the-asid-in-the-ttbr.patch
queue-4.15/arm64-kvm-add-smccc_arch_workaround_1-fast-handling.patch
queue-4.15/arm64-take-into-account-id_aa64pfr0_el1.csv3.patch
queue-4.15/arm64-cputype-add-missing-midr-values-for-cortex-a72-and-cortex-a75.patch
queue-4.15/arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch
queue-4.15/arm64-barrier-add-csdb-macros-to-control-data-value-prediction.patch
queue-4.15/arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch
queue-4.15/perf-arm_spe-fail-device-probe-when-arm64_kernel_unmapped_at_el0.patch
queue-4.15/arm64-implement-branch-predictor-hardening-for-falkor.patch
queue-4.15/arm64-kconfig-add-config_unmap_kernel_at_el0.patch
queue-4.15/arm64-idmap-use-awx-flags-for-.idmap.text-.pushsection-directives.patch
queue-4.15/arm64-run-enable-method-for-errata-work-arounds-on-late-cpus.patch
queue-4.15/arm64-mm-temporarily-disable-arm64_sw_ttbr0_pan.patch
queue-4.15/arm64-entry-add-exception-trampoline-page-for-exceptions-from-el0.patch
queue-4.15/arm64-kvm-make-psci_version-a-fast-path.patch
queue-4.15/arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-early.patch
queue-4.15/arm64-implement-array_index_mask_nospec.patch
queue-4.15/arm64-kpti-add-enable-callback-to-remap-swapper-using-ng-mappings.patch
queue-4.15/arm-arm64-kvm-consolidate-the-psci-include-files.patch
queue-4.15/arm64-add-skeleton-to-harden-the-branch-predictor-against-aliasing-attacks.patch
queue-4.15/arm64-cpu_errata-add-kryo-to-falkor-1003-errata.patch
queue-4.15/arm-arm64-kvm-add-smccc-accessors-to-psci-code.patch
queue-4.15/arm64-entry-add-fake-cpu-feature-for-unmapping-the-kernel-at-el0.patch
queue-4.15/arm64-kvm-use-per-cpu-vector-when-bp-hardening-is-enabled.patch
queue-4.15/arm64-cputype-add-midr-values-for-cavium-thunderx2-cpus.patch
queue-4.15/arm64-kvm-increment-pc-after-handling-an-smc-trap.patch
This is a note to let you know that I've just added the patch titled
[Variant 1/Spectre-v1] arm64: Make USER_DS an inclusive limit
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
arm64-make-user_ds-an-inclusive-limit.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Tue Feb 13 17:25:10 CET 2018
From: Robin Murphy <robin.murphy(a)arm.com>
Date: Mon, 5 Feb 2018 15:34:18 +0000
Subject: [Variant 1/Spectre-v1] arm64: Make USER_DS an inclusive limit
From: Robin Murphy <robin.murphy(a)arm.com>
Commit 51369e398d0d upstream.
Currently, USER_DS represents an exclusive limit while KERNEL_DS is
inclusive. In order to do some clever trickery for speculation-safe
masking, we need them both to behave equivalently - there aren't enough
bits to make KERNEL_DS exclusive, so we have precisely one option. This
also happens to correct a longstanding false negative for a range
ending on the very top byte of kernel memory.
Mark Rutland points out that we've actually got the semantics of
addresses vs. segments muddled up in most of the places we need to
amend, so shuffle the {USER,KERNEL}_DS definitions around such that we
can correct those properly instead of just pasting "-1"s everywhere.
Signed-off-by: Robin Murphy <robin.murphy(a)arm.com>
Signed-off-by: Will Deacon <will.deacon(a)arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/arm64/include/asm/processor.h | 3 ++
arch/arm64/include/asm/uaccess.h | 45 +++++++++++++++++++++----------------
arch/arm64/kernel/entry.S | 4 +--
arch/arm64/mm/fault.c | 4 +--
4 files changed, 33 insertions(+), 23 deletions(-)
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -21,6 +21,9 @@
#define TASK_SIZE_64 (UL(1) << VA_BITS)
+#define KERNEL_DS UL(-1)
+#define USER_DS (TASK_SIZE_64 - 1)
+
#ifndef __ASSEMBLY__
/*
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -35,10 +35,7 @@
#include <asm/compiler.h>
#include <asm/extable.h>
-#define KERNEL_DS (-1UL)
#define get_ds() (KERNEL_DS)
-
-#define USER_DS TASK_SIZE_64
#define get_fs() (current_thread_info()->addr_limit)
static inline void set_fs(mm_segment_t fs)
@@ -66,22 +63,32 @@ static inline void set_fs(mm_segment_t f
* Returns 1 if the range is valid, 0 otherwise.
*
* This is equivalent to the following test:
- * (u65)addr + (u65)size <= current->addr_limit
- *
- * This needs 65-bit arithmetic.
+ * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
*/
-#define __range_ok(addr, size) \
-({ \
- unsigned long __addr = (unsigned long)(addr); \
- unsigned long flag, roksum; \
- __chk_user_ptr(addr); \
- asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
- : "=&r" (flag), "=&r" (roksum) \
- : "1" (__addr), "Ir" (size), \
- "r" (current_thread_info()->addr_limit) \
- : "cc"); \
- flag; \
-})
+static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
+{
+ unsigned long limit = current_thread_info()->addr_limit;
+
+ __chk_user_ptr(addr);
+ asm volatile(
+ // A + B <= C + 1 for all A,B,C, in four easy steps:
+ // 1: X = A + B; X' = X % 2^64
+ " adds %0, %0, %2\n"
+ // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
+ " csel %1, xzr, %1, hi\n"
+ // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
+ // to compensate for the carry flag being set in step 4. For
+ // X > 2^64, X' merely has to remain nonzero, which it does.
+ " csinv %0, %0, xzr, cc\n"
+ // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
+ // comes from the carry in being clear. Otherwise, we are
+ // testing X' - C == 0, subject to the previous adjustments.
+ " sbcs xzr, %0, %1\n"
+ " cset %0, ls\n"
+ : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
+
+ return addr;
+}
/*
* When dealing with data aborts, watchpoints, or instruction traps we may end
@@ -90,7 +97,7 @@ static inline void set_fs(mm_segment_t f
*/
#define untagged_addr(addr) sign_extend64(addr, 55)
-#define access_ok(type, addr, size) __range_ok(addr, size)
+#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size)
#define user_addr_max get_fs
#define _ASM_EXTABLE(from, to) \
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -167,10 +167,10 @@ alternative_else_nop_endif
.else
add x21, sp, #S_FRAME_SIZE
get_thread_info tsk
- /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
+ /* Save the task's original addr_limit and set USER_DS */
ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
str x20, [sp, #S_ORIG_ADDR_LIMIT]
- mov x20, #TASK_SIZE_64
+ mov x20, #USER_DS
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
.endif /* \el == 0 */
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -240,7 +240,7 @@ static inline bool is_permission_fault(u
if (fsc_type == ESR_ELx_FSC_PERM)
return true;
- if (addr < USER_DS && system_uses_ttbr0_pan())
+ if (addr < TASK_SIZE && system_uses_ttbr0_pan())
return fsc_type == ESR_ELx_FSC_FAULT &&
(regs->pstate & PSR_PAN_BIT);
@@ -414,7 +414,7 @@ static int __kprobes do_page_fault(unsig
mm_flags |= FAULT_FLAG_WRITE;
}
- if (addr < USER_DS && is_permission_fault(esr, regs, addr)) {
+ if (addr < TASK_SIZE && is_permission_fault(esr, regs, addr)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
Patches currently in stable-queue which might be from robin.murphy(a)arm.com are
queue-4.15/arm64-make-user_ds-an-inclusive-limit.patch
queue-4.15/arm64-uaccess-don-t-bother-eliding-access_ok-checks-in-__-get-put-_user.patch
queue-4.15/arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch
queue-4.15/firmware-psci-expose-psci-conduit.patch
queue-4.15/firmware-psci-expose-smccc-version-through-psci_ops.patch
queue-4.15/arm64-use-pointer-masking-to-limit-uaccess-speculation.patch
queue-4.15/arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch
queue-4.15/arm64-implement-array_index_mask_nospec.patch
This is a note to let you know that I've just added the patch titled
[Variant 2/Spectre-v2] arm64: KVM: Make PSCI_VERSION a fast path
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
arm64-kvm-make-psci_version-a-fast-path.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Tue Feb 13 17:25:10 CET 2018
From: Marc Zyngier <marc.zyngier(a)arm.com>
Date: Wed, 3 Jan 2018 16:38:37 +0000
Subject: [Variant 2/Spectre-v2] arm64: KVM: Make PSCI_VERSION a fast path
From: Marc Zyngier <marc.zyngier(a)arm.com>
Commit 90348689d500 upstream.
For those CPUs that require PSCI to perform a BP invalidation,
going all the way to the PSCI code for not much is a waste of
precious cycles. Let's terminate that call as early as possible.
Signed-off-by: Marc Zyngier <marc.zyngier(a)arm.com>
Signed-off-by: Will Deacon <will.deacon(a)arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/arm64/kvm/hyp/switch.c | 13 +++++++++++++
1 file changed, 13 insertions(+)
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/jump_label.h>
+#include <uapi/linux/psci.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
@@ -341,6 +342,18 @@ again:
if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
goto again;
+ if (exit_code == ARM_EXCEPTION_TRAP &&
+ (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 ||
+ kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32) &&
+ vcpu_get_reg(vcpu, 0) == PSCI_0_2_FN_PSCI_VERSION) {
+ u64 val = PSCI_RET_NOT_SUPPORTED;
+ if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+ val = 2;
+
+ vcpu_set_reg(vcpu, 0, val);
+ goto again;
+ }
+
if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
exit_code == ARM_EXCEPTION_TRAP) {
bool valid;
Patches currently in stable-queue which might be from marc.zyngier(a)arm.com are
queue-4.15/arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch
queue-4.15/arm64-move-bp-hardening-to-check_and_switch_context.patch
queue-4.15/arm-arm64-kvm-advertise-smccc-v1.1.patch
queue-4.15/arm64-move-post_ttbr_update_workaround-to-c-code.patch
queue-4.15/firmware-psci-expose-psci-conduit.patch
queue-4.15/arm64-force-kpti-to-be-disabled-on-cavium-thunderx.patch
queue-4.15/arm64-entry-apply-bp-hardening-for-high-priority-synchronous-exceptions.patch
queue-4.15/arm64-kpti-fix-the-interaction-between-asid-switching-and-software-pan.patch
queue-4.15/firmware-psci-expose-smccc-version-through-psci_ops.patch
queue-4.15/arm64-implement-branch-predictor-hardening-for-affected-cortex-a-cpus.patch
queue-4.15/arm-arm64-kvm-add-psci_version-helper.patch
queue-4.15/arm64-kill-psci_get_version-as-a-variant-2-workaround.patch
queue-4.15/arm64-entry-apply-bp-hardening-for-suspicious-interrupts-from-el0.patch
queue-4.15/arm64-capabilities-handle-duplicate-entries-for-a-capability.patch
queue-4.15/arm64-add-arm_smccc_arch_workaround_1-bp-hardening-support.patch
queue-4.15/arm-arm64-kvm-turn-kvm_psci_version-into-a-static-inline.patch
queue-4.15/arm-arm64-kvm-implement-psci-1.0-support.patch
queue-4.15/arm64-kvm-add-smccc_arch_workaround_1-fast-handling.patch
queue-4.15/arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch
queue-4.15/arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch
queue-4.15/arm64-idmap-use-awx-flags-for-.idmap.text-.pushsection-directives.patch
queue-4.15/arm64-kvm-make-psci_version-a-fast-path.patch
queue-4.15/arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-early.patch
queue-4.15/arm64-kpti-add-enable-callback-to-remap-swapper-using-ng-mappings.patch
queue-4.15/arm-arm64-kvm-consolidate-the-psci-include-files.patch
queue-4.15/arm64-add-skeleton-to-harden-the-branch-predictor-against-aliasing-attacks.patch
queue-4.15/arm-arm64-kvm-add-smccc-accessors-to-psci-code.patch
queue-4.15/arm64-kvm-use-per-cpu-vector-when-bp-hardening-is-enabled.patch
queue-4.15/arm64-kvm-increment-pc-after-handling-an-smc-trap.patch
This is a note to let you know that I've just added the patch titled
[Variant 2/Spectre-v2] arm64: KVM: Report SMCCC_ARCH_WORKAROUND_1 BP hardening support
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Tue Feb 13 17:25:10 CET 2018
From: Marc Zyngier <marc.zyngier(a)arm.com>
Date: Tue, 6 Feb 2018 17:56:14 +0000
Subject: [Variant 2/Spectre-v2] arm64: KVM: Report SMCCC_ARCH_WORKAROUND_1 BP hardening support
From: Marc Zyngier <marc.zyngier(a)arm.com>
Commit 6167ec5c9145 upstream.
A new feature of SMCCC 1.1 is that it offers firmware-based CPU
workarounds. In particular, SMCCC_ARCH_WORKAROUND_1 provides
BP hardening for CVE-2017-5715.
If the host has some mitigation for this issue, report that
we deal with it using SMCCC_ARCH_WORKAROUND_1, as we apply the
host workaround on every guest exit.
Tested-by: Ard Biesheuvel <ard.biesheuvel(a)linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall(a)linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier(a)arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Will Deacon <will.deacon(a)arm.com>
Conflicts:
arch/arm/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_host.h
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/arm/include/asm/kvm_host.h | 6 ++++++
arch/arm64/include/asm/kvm_host.h | 5 +++++
include/linux/arm-smccc.h | 5 +++++
virt/kvm/arm/psci.c | 9 ++++++++-
4 files changed, 24 insertions(+), 1 deletion(-)
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -301,4 +301,10 @@ int kvm_arm_vcpu_arch_has_attr(struct kv
/* All host FP/SIMD state is restored on guest exit, so nothing to save: */
static inline void kvm_fpsimd_flush_cpu_state(void) {}
+static inline bool kvm_arm_harden_branch_predictor(void)
+{
+ /* No way to detect it yet, pretend it is not there. */
+ return false;
+}
+
#endif /* __ARM_KVM_HOST_H__ */
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -396,4 +396,9 @@ static inline void kvm_fpsimd_flush_cpu_
sve_flush_cpu_state();
}
+static inline bool kvm_arm_harden_branch_predictor(void)
+{
+ return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
+}
+
#endif /* __ARM64_KVM_HOST_H__ */
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -73,6 +73,11 @@
ARM_SMCCC_SMC_32, \
0, 1)
+#define ARM_SMCCC_ARCH_WORKAROUND_1 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x8000)
+
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -405,13 +405,20 @@ int kvm_hvc_call_handler(struct kvm_vcpu
{
u32 func_id = smccc_get_function(vcpu);
u32 val = PSCI_RET_NOT_SUPPORTED;
+ u32 feature;
switch (func_id) {
case ARM_SMCCC_VERSION_FUNC_ID:
val = ARM_SMCCC_VERSION_1_1;
break;
case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
- /* Nothing supported yet */
+ feature = smccc_get_arg1(vcpu);
+ switch(feature) {
+ case ARM_SMCCC_ARCH_WORKAROUND_1:
+ if (kvm_arm_harden_branch_predictor())
+ val = 0;
+ break;
+ }
break;
default:
return kvm_psci_call(vcpu);
Patches currently in stable-queue which might be from marc.zyngier(a)arm.com are
queue-4.15/arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch
queue-4.15/arm64-move-bp-hardening-to-check_and_switch_context.patch
queue-4.15/arm-arm64-kvm-advertise-smccc-v1.1.patch
queue-4.15/arm64-move-post_ttbr_update_workaround-to-c-code.patch
queue-4.15/firmware-psci-expose-psci-conduit.patch
queue-4.15/arm64-force-kpti-to-be-disabled-on-cavium-thunderx.patch
queue-4.15/arm64-entry-apply-bp-hardening-for-high-priority-synchronous-exceptions.patch
queue-4.15/arm64-kpti-fix-the-interaction-between-asid-switching-and-software-pan.patch
queue-4.15/firmware-psci-expose-smccc-version-through-psci_ops.patch
queue-4.15/arm64-implement-branch-predictor-hardening-for-affected-cortex-a-cpus.patch
queue-4.15/arm-arm64-kvm-add-psci_version-helper.patch
queue-4.15/arm64-kill-psci_get_version-as-a-variant-2-workaround.patch
queue-4.15/arm64-entry-apply-bp-hardening-for-suspicious-interrupts-from-el0.patch
queue-4.15/arm64-capabilities-handle-duplicate-entries-for-a-capability.patch
queue-4.15/arm64-add-arm_smccc_arch_workaround_1-bp-hardening-support.patch
queue-4.15/arm-arm64-kvm-turn-kvm_psci_version-into-a-static-inline.patch
queue-4.15/arm-arm64-kvm-implement-psci-1.0-support.patch
queue-4.15/arm64-kvm-add-smccc_arch_workaround_1-fast-handling.patch
queue-4.15/arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch
queue-4.15/arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch
queue-4.15/arm64-idmap-use-awx-flags-for-.idmap.text-.pushsection-directives.patch
queue-4.15/arm64-kvm-make-psci_version-a-fast-path.patch
queue-4.15/arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-early.patch
queue-4.15/arm64-kpti-add-enable-callback-to-remap-swapper-using-ng-mappings.patch
queue-4.15/arm-arm64-kvm-consolidate-the-psci-include-files.patch
queue-4.15/arm64-add-skeleton-to-harden-the-branch-predictor-against-aliasing-attacks.patch
queue-4.15/arm-arm64-kvm-add-smccc-accessors-to-psci-code.patch
queue-4.15/arm64-kvm-use-per-cpu-vector-when-bp-hardening-is-enabled.patch
queue-4.15/arm64-kvm-increment-pc-after-handling-an-smc-trap.patch
This is a note to let you know that I've just added the patch titled
[Variant 2/Spectre-v2] arm64: KVM: Increment PC after handling an SMC trap
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
arm64-kvm-increment-pc-after-handling-an-smc-trap.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Tue Feb 13 17:25:10 CET 2018
From: Marc Zyngier <marc.zyngier(a)arm.com>
Date: Tue, 6 Feb 2018 17:56:07 +0000
Subject: [Variant 2/Spectre-v2] arm64: KVM: Increment PC after handling an SMC trap
From: Marc Zyngier <marc.zyngier(a)arm.com>
Commit f5115e8869e1 upstream.
When handling an SMC trap, the "preferred return address" is set
to that of the SMC, and not the next PC (which is a departure from
the behaviour of an SMC that isn't trapped).
Increment PC in the handler, as the guest is otherwise forever
stuck...
Cc: stable(a)vger.kernel.org
Fixes: acfb3b883f6d ("arm64: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls")
Reviewed-by: Christoffer Dall <christoffer.dall(a)linaro.org>
Tested-by: Ard Biesheuvel <ard.biesheuvel(a)linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier(a)arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Will Deacon <will.deacon(a)arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/arm64/kvm/handle_exit.c | 9 +++++++++
1 file changed, 9 insertions(+)
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -54,7 +54,16 @@ static int handle_hvc(struct kvm_vcpu *v
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
+ /*
+ * "If an SMC instruction executed at Non-secure EL1 is
+ * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
+ * Trap exception, not a Secure Monitor Call exception [...]"
+ *
+ * We need to advance the PC after the trap, as it would
+ * otherwise return to the same address...
+ */
vcpu_set_reg(vcpu, 0, ~0UL);
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
Patches currently in stable-queue which might be from marc.zyngier(a)arm.com are
queue-4.15/arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch
queue-4.15/arm64-move-bp-hardening-to-check_and_switch_context.patch
queue-4.15/arm-arm64-kvm-advertise-smccc-v1.1.patch
queue-4.15/arm64-move-post_ttbr_update_workaround-to-c-code.patch
queue-4.15/firmware-psci-expose-psci-conduit.patch
queue-4.15/arm64-force-kpti-to-be-disabled-on-cavium-thunderx.patch
queue-4.15/arm64-entry-apply-bp-hardening-for-high-priority-synchronous-exceptions.patch
queue-4.15/arm64-kpti-fix-the-interaction-between-asid-switching-and-software-pan.patch
queue-4.15/firmware-psci-expose-smccc-version-through-psci_ops.patch
queue-4.15/arm64-implement-branch-predictor-hardening-for-affected-cortex-a-cpus.patch
queue-4.15/arm-arm64-kvm-add-psci_version-helper.patch
queue-4.15/arm64-kill-psci_get_version-as-a-variant-2-workaround.patch
queue-4.15/arm64-entry-apply-bp-hardening-for-suspicious-interrupts-from-el0.patch
queue-4.15/arm64-capabilities-handle-duplicate-entries-for-a-capability.patch
queue-4.15/arm64-add-arm_smccc_arch_workaround_1-bp-hardening-support.patch
queue-4.15/arm-arm64-kvm-turn-kvm_psci_version-into-a-static-inline.patch
queue-4.15/arm-arm64-kvm-implement-psci-1.0-support.patch
queue-4.15/arm64-kvm-add-smccc_arch_workaround_1-fast-handling.patch
queue-4.15/arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch
queue-4.15/arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch
queue-4.15/arm64-idmap-use-awx-flags-for-.idmap.text-.pushsection-directives.patch
queue-4.15/arm64-kvm-make-psci_version-a-fast-path.patch
queue-4.15/arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-early.patch
queue-4.15/arm64-kpti-add-enable-callback-to-remap-swapper-using-ng-mappings.patch
queue-4.15/arm-arm64-kvm-consolidate-the-psci-include-files.patch
queue-4.15/arm64-add-skeleton-to-harden-the-branch-predictor-against-aliasing-attacks.patch
queue-4.15/arm-arm64-kvm-add-smccc-accessors-to-psci-code.patch
queue-4.15/arm64-kvm-use-per-cpu-vector-when-bp-hardening-is-enabled.patch
queue-4.15/arm64-kvm-increment-pc-after-handling-an-smc-trap.patch
This is a note to let you know that I've just added the patch titled
[Variant 2/Spectre-v2] arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
arm64-kvm-add-smccc_arch_workaround_1-fast-handling.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Tue Feb 13 17:25:10 CET 2018
From: Marc Zyngier <marc.zyngier(a)arm.com>
Date: Tue, 6 Feb 2018 17:56:15 +0000
Subject: [Variant 2/Spectre-v2] arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling
From: Marc Zyngier <marc.zyngier(a)arm.com>
Commit f72af90c3783 upstream.
We want SMCCC_ARCH_WORKAROUND_1 to be fast. As fast as possible.
So let's intercept it as early as we can by testing for the
function call number as soon as we've identified a HVC call
coming from the guest.
Tested-by: Ard Biesheuvel <ard.biesheuvel(a)linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall(a)linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier(a)arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Will Deacon <will.deacon(a)arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/arm64/kvm/hyp/hyp-entry.S | 20 ++++++++++++++++++--
1 file changed, 18 insertions(+), 2 deletions(-)
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -15,6 +15,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/arm-smccc.h>
#include <linux/linkage.h>
#include <asm/alternative.h>
@@ -64,10 +65,11 @@ alternative_endif
lsr x0, x1, #ESR_ELx_EC_SHIFT
cmp x0, #ESR_ELx_EC_HVC64
+ ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
b.ne el1_trap
- mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest
- cbnz x1, el1_trap // called HVC
+ mrs x1, vttbr_el2 // If vttbr is valid, the guest
+ cbnz x1, el1_hvc_guest // called HVC
/* Here, we're pretty sure the host called HVC. */
ldp x0, x1, [sp], #16
@@ -100,6 +102,20 @@ alternative_endif
eret
+el1_hvc_guest:
+ /*
+ * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
+ * The workaround has already been applied on the host,
+ * so let's quickly get back to the guest. We don't bother
+ * restoring x1, as it can be clobbered anyway.
+ */
+ ldr x1, [sp] // Guest's x0
+ eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
+ cbnz w1, el1_trap
+ mov x0, x1
+ add sp, sp, #16
+ eret
+
el1_trap:
/*
* x0: ESR_EC
Patches currently in stable-queue which might be from marc.zyngier(a)arm.com are
queue-4.15/arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch
queue-4.15/arm64-move-bp-hardening-to-check_and_switch_context.patch
queue-4.15/arm-arm64-kvm-advertise-smccc-v1.1.patch
queue-4.15/arm64-move-post_ttbr_update_workaround-to-c-code.patch
queue-4.15/firmware-psci-expose-psci-conduit.patch
queue-4.15/arm64-force-kpti-to-be-disabled-on-cavium-thunderx.patch
queue-4.15/arm64-entry-apply-bp-hardening-for-high-priority-synchronous-exceptions.patch
queue-4.15/arm64-kpti-fix-the-interaction-between-asid-switching-and-software-pan.patch
queue-4.15/firmware-psci-expose-smccc-version-through-psci_ops.patch
queue-4.15/arm64-implement-branch-predictor-hardening-for-affected-cortex-a-cpus.patch
queue-4.15/arm-arm64-kvm-add-psci_version-helper.patch
queue-4.15/arm64-kill-psci_get_version-as-a-variant-2-workaround.patch
queue-4.15/arm64-entry-apply-bp-hardening-for-suspicious-interrupts-from-el0.patch
queue-4.15/arm64-capabilities-handle-duplicate-entries-for-a-capability.patch
queue-4.15/arm64-add-arm_smccc_arch_workaround_1-bp-hardening-support.patch
queue-4.15/arm-arm64-kvm-turn-kvm_psci_version-into-a-static-inline.patch
queue-4.15/arm-arm64-kvm-implement-psci-1.0-support.patch
queue-4.15/arm64-kvm-add-smccc_arch_workaround_1-fast-handling.patch
queue-4.15/arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch
queue-4.15/arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch
queue-4.15/arm64-idmap-use-awx-flags-for-.idmap.text-.pushsection-directives.patch
queue-4.15/arm64-kvm-make-psci_version-a-fast-path.patch
queue-4.15/arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-early.patch
queue-4.15/arm64-kpti-add-enable-callback-to-remap-swapper-using-ng-mappings.patch
queue-4.15/arm-arm64-kvm-consolidate-the-psci-include-files.patch
queue-4.15/arm64-add-skeleton-to-harden-the-branch-predictor-against-aliasing-attacks.patch
queue-4.15/arm-arm64-kvm-add-smccc-accessors-to-psci-code.patch
queue-4.15/arm64-kvm-use-per-cpu-vector-when-bp-hardening-is-enabled.patch
queue-4.15/arm64-kvm-increment-pc-after-handling-an-smc-trap.patch