This is a note to let you know that I've just added the patch titled
x86/retpoline: Add LFENCE to the retpoline/RSB filling RSB macros
to the 4.9-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-retpoline-add-lfence-to-the-retpoline-rsb-filling-rsb-macros.patch
and it can be found in the queue-4.9 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 28d437d550e1e39f805d99f9f8ac399c778827b7 Mon Sep 17 00:00:00 2001
From: Tom Lendacky <thomas.lendacky(a)amd.com>
Date: Sat, 13 Jan 2018 17:27:30 -0600
Subject: x86/retpoline: Add LFENCE to the retpoline/RSB filling RSB macros
From: Tom Lendacky <thomas.lendacky(a)amd.com>
commit 28d437d550e1e39f805d99f9f8ac399c778827b7 upstream.
The PAUSE instruction is currently used in the retpoline and RSB filling
macros as a speculation trap. The use of PAUSE was originally suggested
because it showed a very, very small difference in the amount of
cycles/time used to execute the retpoline as compared to LFENCE. On AMD,
the PAUSE instruction is not a serializing instruction, so the pause/jmp
loop will use excess power as it is speculated over waiting for return
to mispredict to the correct target.
The RSB filling macro is applicable to AMD, and, if software is unable to
verify that LFENCE is serializing on AMD (possible when running under a
hypervisor), the generic retpoline support will be used and, so, is also
applicable to AMD. Keep the current usage of PAUSE for Intel, but add an
LFENCE instruction to the speculation trap for AMD.
The same sequence has been adopted by GCC for the GCC generated retpolines.
Signed-off-by: Tom Lendacky <thomas.lendacky(a)amd.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Reviewed-by: Borislav Petkov <bp(a)alien8.de>
Acked-by: David Woodhouse <dwmw(a)amazon.co.uk>
Acked-by: Arjan van de Ven <arjan(a)linux.intel.com>
Cc: Rik van Riel <riel(a)redhat.com>
Cc: Andi Kleen <ak(a)linux.intel.com>
Cc: Paul Turner <pjt(a)google.com>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Tim Chen <tim.c.chen(a)linux.intel.com>
Cc: Jiri Kosina <jikos(a)kernel.org>
Cc: Dave Hansen <dave.hansen(a)intel.com>
Cc: Andy Lutomirski <luto(a)kernel.org>
Cc: Josh Poimboeuf <jpoimboe(a)redhat.com>
Cc: Dan Williams <dan.j.williams(a)intel.com>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Greg Kroah-Hartman <gregkh(a)linux-foundation.org>
Cc: Kees Cook <keescook(a)google.com>
Link: https://lkml.kernel.org/r/20180113232730.31060.36287.stgit@tlendack-t1.amdo…
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/include/asm/nospec-branch.h | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -11,7 +11,7 @@
* Fill the CPU return stack buffer.
*
* Each entry in the RSB, if used for a speculative 'ret', contains an
- * infinite 'pause; jmp' loop to capture speculative execution.
+ * infinite 'pause; lfence; jmp' loop to capture speculative execution.
*
* This is required in various cases for retpoline and IBRS-based
* mitigations for the Spectre variant 2 vulnerability. Sometimes to
@@ -38,11 +38,13 @@
call 772f; \
773: /* speculation trap */ \
pause; \
+ lfence; \
jmp 773b; \
772: \
call 774f; \
775: /* speculation trap */ \
pause; \
+ lfence; \
jmp 775b; \
774: \
dec reg; \
@@ -73,6 +75,7 @@
call .Ldo_rop_\@
.Lspec_trap_\@:
pause
+ lfence
jmp .Lspec_trap_\@
.Ldo_rop_\@:
mov \reg, (%_ASM_SP)
@@ -165,6 +168,7 @@
" .align 16\n" \
"901: call 903f;\n" \
"902: pause;\n" \
+ " lfence;\n" \
" jmp 902b;\n" \
" .align 16\n" \
"903: addl $4, %%esp;\n" \
Patches currently in stable-queue which might be from thomas.lendacky(a)amd.com are
queue-4.9/x86-retpoline-fill-rsb-on-context-switch-for-affected-cpus.patch
queue-4.9/x86-retpoline-add-lfence-to-the-retpoline-rsb-filling-rsb-macros.patch
This is a note to let you know that I've just added the patch titled
x86/cpufeature: Move processor tracing out of scattered features
to the 4.9-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-cpufeature-move-processor-tracing-out-of-scattered-features.patch
and it can be found in the queue-4.9 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 4fdec2034b7540dda461c6ba33325dfcff345c64 Mon Sep 17 00:00:00 2001
From: Paolo Bonzini <pbonzini(a)redhat.com>
Date: Tue, 16 Jan 2018 16:42:25 +0100
Subject: x86/cpufeature: Move processor tracing out of scattered features
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
From: Paolo Bonzini <pbonzini(a)redhat.com>
commit 4fdec2034b7540dda461c6ba33325dfcff345c64 upstream.
Processor tracing is already enumerated in word 9 (CPUID[7,0].EBX),
so do not duplicate it in the scattered features word.
Besides being more tidy, this will be useful for KVM when it presents
processor tracing to the guests. KVM selects host features that are
supported by both the host kernel (depending on command line options,
CPU errata, or whatever) and KVM. Whenever a full feature word exists,
KVM's code is written in the expectation that the CPUID bit number
matches the X86_FEATURE_* bit number, but this is not the case for
X86_FEATURE_INTEL_PT.
Signed-off-by: Paolo Bonzini <pbonzini(a)redhat.com>
Cc: Borislav Petkov <bp(a)suse.de>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Luwei Kang <luwei.kang(a)intel.com>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Radim Krčmář <rkrcmar(a)redhat.com>
Cc: Thomas Gleixner <tglx(a)linutronix.de>
Cc: kvm(a)vger.kernel.org
Link: http://lkml.kernel.org/r/1516117345-34561-1-git-send-email-pbonzini@redhat.…
Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/include/asm/cpufeatures.h | 2 +-
arch/x86/kernel/cpu/scattered.c | 1 -
2 files changed, 1 insertion(+), 2 deletions(-)
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -197,7 +197,6 @@
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
-#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
@@ -236,6 +235,7 @@
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
+#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -31,7 +31,6 @@ void init_scattered_cpuid_features(struc
const struct cpuid_bit *cb;
static const struct cpuid_bit cpuid_bits[] = {
- { X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
{ X86_FEATURE_AVX512_4VNNIW, CR_EDX, 2, 0x00000007, 0 },
{ X86_FEATURE_AVX512_4FMAPS, CR_EDX, 3, 0x00000007, 0 },
{ X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
Patches currently in stable-queue which might be from pbonzini(a)redhat.com are
queue-4.9/x86-cpufeature-move-processor-tracing-out-of-scattered-features.patch
This is a note to let you know that I've just added the patch titled
x86/mm/pkeys: Fix fill_sig_info_pkey
to the 4.9-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-mm-pkeys-fix-fill_sig_info_pkey.patch
and it can be found in the queue-4.9 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From beacd6f7ed5e2915959442245b3b2480c2e37490 Mon Sep 17 00:00:00 2001
From: "Eric W. Biederman" <ebiederm(a)xmission.com>
Date: Fri, 12 Jan 2018 14:31:35 -0600
Subject: x86/mm/pkeys: Fix fill_sig_info_pkey
From: Eric W. Biederman <ebiederm(a)xmission.com>
commit beacd6f7ed5e2915959442245b3b2480c2e37490 upstream.
SEGV_PKUERR is a signal specific si_code which happens to have the same
numeric value as several others: BUS_MCEERR_AR, ILL_ILLTRP, FPE_FLTOVF,
TRAP_HWBKPT, CLD_TRAPPED, POLL_ERR, SEGV_THREAD_ID, as such it is not safe
to just test the si_code the signal number must also be tested to prevent a
false positive in fill_sig_info_pkey.
This error was by inspection, and BUS_MCEERR_AR appears to be a real
candidate for confusion. So pass in si_signo and check for SIG_SEGV to
verify that it is actually a SEGV_PKUERR
Fixes: 019132ff3daf ("x86/mm/pkeys: Fill in pkey field in siginfo")
Signed-off-by: "Eric W. Biederman" <ebiederm(a)xmission.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Cc: linux-arch(a)vger.kernel.org
Cc: Dave Hansen <dave.hansen(a)linux.intel.com>
Cc: Oleg Nesterov <oleg(a)redhat.com>
Cc: Al Viro <viro(a)zeniv.linux.org.uk>
Link: https://lkml.kernel.org/r/20180112203135.4669-2-ebiederm@xmission.com
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/mm/fault.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -191,14 +191,15 @@ is_prefetch(struct pt_regs *regs, unsign
* 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
* faulted on a pte with its pkey=4.
*/
-static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
+static void fill_sig_info_pkey(int si_signo, int si_code, siginfo_t *info,
+ u32 *pkey)
{
/* This is effectively an #ifdef */
if (!boot_cpu_has(X86_FEATURE_OSPKE))
return;
/* Fault not from Protection Keys: nothing to do */
- if (si_code != SEGV_PKUERR)
+ if ((si_code != SEGV_PKUERR) || (si_signo != SIGSEGV))
return;
/*
* force_sig_info_fault() is called from a number of
@@ -237,7 +238,7 @@ force_sig_info_fault(int si_signo, int s
lsb = PAGE_SHIFT;
info.si_addr_lsb = lsb;
- fill_sig_info_pkey(si_code, &info, pkey);
+ fill_sig_info_pkey(si_signo, si_code, &info, pkey);
force_sig_info(si_signo, &info, tsk);
}
Patches currently in stable-queue which might be from ebiederm(a)xmission.com are
queue-4.9/x86-mm-pkeys-fix-fill_sig_info_pkey.patch
This is a note to let you know that I've just added the patch titled
objtool: Improve error message for bad file argument
to the 4.9-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
objtool-improve-error-message-for-bad-file-argument.patch
and it can be found in the queue-4.9 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 385d11b152c4eb638eeb769edcb3249533bb9a00 Mon Sep 17 00:00:00 2001
From: Josh Poimboeuf <jpoimboe(a)redhat.com>
Date: Mon, 15 Jan 2018 08:17:08 -0600
Subject: objtool: Improve error message for bad file argument
From: Josh Poimboeuf <jpoimboe(a)redhat.com>
commit 385d11b152c4eb638eeb769edcb3249533bb9a00 upstream.
If a nonexistent file is supplied to objtool, it complains with a
non-helpful error:
open: No such file or directory
Improve it to:
objtool: Can't open 'foo': No such file or directory
Reported-by: Markus <M4rkusXXL(a)web.de>
Signed-off-by: Josh Poimboeuf <jpoimboe(a)redhat.com>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Thomas Gleixner <tglx(a)linutronix.de>
Link: http://lkml.kernel.org/r/406a3d00a21225eee2819844048e17f68523ccf6.151602565…
Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
tools/objtool/elf.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -26,6 +26,7 @@
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
+#include <errno.h>
#include "elf.h"
#include "warn.h"
@@ -370,7 +371,8 @@ struct elf *elf_open(const char *name)
elf->fd = open(name, O_RDONLY);
if (elf->fd == -1) {
- perror("open");
+ fprintf(stderr, "objtool: Can't open '%s': %s\n",
+ name, strerror(errno));
goto err;
}
Patches currently in stable-queue which might be from jpoimboe(a)redhat.com are
queue-4.9/objtool-improve-error-message-for-bad-file-argument.patch
queue-4.9/x86-retpoline-fill-rsb-on-context-switch-for-affected-cpus.patch
queue-4.9/x86-retpoline-add-lfence-to-the-retpoline-rsb-filling-rsb-macros.patch
This is a note to let you know that I've just added the patch titled
module: Add retpoline tag to VERMAGIC
to the 4.9-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
module-add-retpoline-tag-to-vermagic.patch
and it can be found in the queue-4.9 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 6cfb521ac0d5b97470883ff9b7facae264b7ab12 Mon Sep 17 00:00:00 2001
From: Andi Kleen <ak(a)linux.intel.com>
Date: Tue, 16 Jan 2018 12:52:28 -0800
Subject: module: Add retpoline tag to VERMAGIC
From: Andi Kleen <ak(a)linux.intel.com>
commit 6cfb521ac0d5b97470883ff9b7facae264b7ab12 upstream.
Add a marker for retpoline to the module VERMAGIC. This catches the case
when a non RETPOLINE compiled module gets loaded into a retpoline kernel,
making it insecure.
It doesn't handle the case when retpoline has been runtime disabled. Even
in this case the match of the retcompile status will be enforced. This
implies that even with retpoline run time disabled all modules loaded need
to be recompiled.
Signed-off-by: Andi Kleen <ak(a)linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Reviewed-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Acked-by: David Woodhouse <dwmw(a)amazon.co.uk>
Cc: rusty(a)rustcorp.com.au
Cc: arjan.van.de.ven(a)intel.com
Cc: jeyu(a)kernel.org
Cc: torvalds(a)linux-foundation.org
Link: https://lkml.kernel.org/r/20180116205228.4890-1-andi@firstfloor.org
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
include/linux/vermagic.h | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -24,10 +24,16 @@
#ifndef MODULE_ARCH_VERMAGIC
#define MODULE_ARCH_VERMAGIC ""
#endif
+#ifdef RETPOLINE
+#define MODULE_VERMAGIC_RETPOLINE "retpoline "
+#else
+#define MODULE_VERMAGIC_RETPOLINE ""
+#endif
#define VERMAGIC_STRING \
UTS_RELEASE " " \
MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
- MODULE_ARCH_VERMAGIC
+ MODULE_ARCH_VERMAGIC \
+ MODULE_VERMAGIC_RETPOLINE
Patches currently in stable-queue which might be from ak(a)linux.intel.com are
queue-4.9/module-add-retpoline-tag-to-vermagic.patch
queue-4.9/x86-retpoline-fill-rsb-on-context-switch-for-affected-cpus.patch
queue-4.9/x86-retpoline-add-lfence-to-the-retpoline-rsb-filling-rsb-macros.patch
This is a note to let you know that I've just added the patch titled
x86/retpoline: Add LFENCE to the retpoline/RSB filling RSB macros
to the 4.4-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-retpoline-add-lfence-to-the-retpoline-rsb-filling-rsb-macros.patch
and it can be found in the queue-4.4 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 28d437d550e1e39f805d99f9f8ac399c778827b7 Mon Sep 17 00:00:00 2001
From: Tom Lendacky <thomas.lendacky(a)amd.com>
Date: Sat, 13 Jan 2018 17:27:30 -0600
Subject: x86/retpoline: Add LFENCE to the retpoline/RSB filling RSB macros
From: Tom Lendacky <thomas.lendacky(a)amd.com>
commit 28d437d550e1e39f805d99f9f8ac399c778827b7 upstream.
The PAUSE instruction is currently used in the retpoline and RSB filling
macros as a speculation trap. The use of PAUSE was originally suggested
because it showed a very, very small difference in the amount of
cycles/time used to execute the retpoline as compared to LFENCE. On AMD,
the PAUSE instruction is not a serializing instruction, so the pause/jmp
loop will use excess power as it is speculated over waiting for return
to mispredict to the correct target.
The RSB filling macro is applicable to AMD, and, if software is unable to
verify that LFENCE is serializing on AMD (possible when running under a
hypervisor), the generic retpoline support will be used and, so, is also
applicable to AMD. Keep the current usage of PAUSE for Intel, but add an
LFENCE instruction to the speculation trap for AMD.
The same sequence has been adopted by GCC for the GCC generated retpolines.
Signed-off-by: Tom Lendacky <thomas.lendacky(a)amd.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Reviewed-by: Borislav Petkov <bp(a)alien8.de>
Acked-by: David Woodhouse <dwmw(a)amazon.co.uk>
Acked-by: Arjan van de Ven <arjan(a)linux.intel.com>
Cc: Rik van Riel <riel(a)redhat.com>
Cc: Andi Kleen <ak(a)linux.intel.com>
Cc: Paul Turner <pjt(a)google.com>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Tim Chen <tim.c.chen(a)linux.intel.com>
Cc: Jiri Kosina <jikos(a)kernel.org>
Cc: Dave Hansen <dave.hansen(a)intel.com>
Cc: Andy Lutomirski <luto(a)kernel.org>
Cc: Josh Poimboeuf <jpoimboe(a)redhat.com>
Cc: Dan Williams <dan.j.williams(a)intel.com>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Greg Kroah-Hartman <gregkh(a)linux-foundation.org>
Cc: Kees Cook <keescook(a)google.com>
Link: https://lkml.kernel.org/r/20180113232730.31060.36287.stgit@tlendack-t1.amdo…
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/include/asm/nospec-branch.h | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -11,7 +11,7 @@
* Fill the CPU return stack buffer.
*
* Each entry in the RSB, if used for a speculative 'ret', contains an
- * infinite 'pause; jmp' loop to capture speculative execution.
+ * infinite 'pause; lfence; jmp' loop to capture speculative execution.
*
* This is required in various cases for retpoline and IBRS-based
* mitigations for the Spectre variant 2 vulnerability. Sometimes to
@@ -38,11 +38,13 @@
call 772f; \
773: /* speculation trap */ \
pause; \
+ lfence; \
jmp 773b; \
772: \
call 774f; \
775: /* speculation trap */ \
pause; \
+ lfence; \
jmp 775b; \
774: \
dec reg; \
@@ -60,6 +62,7 @@
call .Ldo_rop_\@
.Lspec_trap_\@:
pause
+ lfence
jmp .Lspec_trap_\@
.Ldo_rop_\@:
mov \reg, (%_ASM_SP)
@@ -142,6 +145,7 @@
" .align 16\n" \
"901: call 903f;\n" \
"902: pause;\n" \
+ " lfence;\n" \
" jmp 902b;\n" \
" .align 16\n" \
"903: addl $4, %%esp;\n" \
Patches currently in stable-queue which might be from thomas.lendacky(a)amd.com are
queue-4.4/x86-spectre-add-boot-time-option-to-select-spectre-v2-mitigation.patch
queue-4.4/x86-retpoline-irq32-convert-assembler-indirect-jumps.patch
queue-4.4/x86-retpoline-hyperv-convert-assembler-indirect-jumps.patch
queue-4.4/x86-retpoline-entry-convert-entry-assembler-indirect-jumps.patch
queue-4.4/x86-cpu-amd-make-lfence-a-serializing-instruction.patch
queue-4.4/x86-retpoline-ftrace-convert-ftrace-assembler-indirect-jumps.patch
queue-4.4/x86-retpoline-crypto-convert-crypto-assembler-indirect-jumps.patch
queue-4.4/x86-retpoline-xen-convert-xen-hypercall-indirect-jumps.patch
queue-4.4/x86-retpoline-checksum32-convert-assembler-indirect-jumps.patch
queue-4.4/x86-retpoline-fill-return-stack-buffer-on-vmexit.patch
queue-4.4/x86-retpoline-add-lfence-to-the-retpoline-rsb-filling-rsb-macros.patch
queue-4.4/x86-retpoline-remove-compile-time-warning.patch
queue-4.4/x86-cpu-amd-use-lfence_rdtsc-in-preference-to-mfence_rdtsc.patch
queue-4.4/x86-retpoline-add-initial-retpoline-support.patch
This is a note to let you know that I've just added the patch titled
module: Add retpoline tag to VERMAGIC
to the 4.4-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
module-add-retpoline-tag-to-vermagic.patch
and it can be found in the queue-4.4 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 6cfb521ac0d5b97470883ff9b7facae264b7ab12 Mon Sep 17 00:00:00 2001
From: Andi Kleen <ak(a)linux.intel.com>
Date: Tue, 16 Jan 2018 12:52:28 -0800
Subject: module: Add retpoline tag to VERMAGIC
From: Andi Kleen <ak(a)linux.intel.com>
commit 6cfb521ac0d5b97470883ff9b7facae264b7ab12 upstream.
Add a marker for retpoline to the module VERMAGIC. This catches the case
when a non RETPOLINE compiled module gets loaded into a retpoline kernel,
making it insecure.
It doesn't handle the case when retpoline has been runtime disabled. Even
in this case the match of the retcompile status will be enforced. This
implies that even with retpoline run time disabled all modules loaded need
to be recompiled.
Signed-off-by: Andi Kleen <ak(a)linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Reviewed-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Acked-by: David Woodhouse <dwmw(a)amazon.co.uk>
Cc: rusty(a)rustcorp.com.au
Cc: arjan.van.de.ven(a)intel.com
Cc: jeyu(a)kernel.org
Cc: torvalds(a)linux-foundation.org
Link: https://lkml.kernel.org/r/20180116205228.4890-1-andi@firstfloor.org
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
include/linux/vermagic.h | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -24,10 +24,16 @@
#ifndef MODULE_ARCH_VERMAGIC
#define MODULE_ARCH_VERMAGIC ""
#endif
+#ifdef RETPOLINE
+#define MODULE_VERMAGIC_RETPOLINE "retpoline "
+#else
+#define MODULE_VERMAGIC_RETPOLINE ""
+#endif
#define VERMAGIC_STRING \
UTS_RELEASE " " \
MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
- MODULE_ARCH_VERMAGIC
+ MODULE_ARCH_VERMAGIC \
+ MODULE_VERMAGIC_RETPOLINE
Patches currently in stable-queue which might be from ak(a)linux.intel.com are
queue-4.4/x86-spectre-add-boot-time-option-to-select-spectre-v2-mitigation.patch
queue-4.4/x86-retpoline-irq32-convert-assembler-indirect-jumps.patch
queue-4.4/x86-retpoline-hyperv-convert-assembler-indirect-jumps.patch
queue-4.4/x86-retpoline-entry-convert-entry-assembler-indirect-jumps.patch
queue-4.4/x86-retpoline-ftrace-convert-ftrace-assembler-indirect-jumps.patch
queue-4.4/x86-retpoline-crypto-convert-crypto-assembler-indirect-jumps.patch
queue-4.4/module-add-retpoline-tag-to-vermagic.patch
queue-4.4/x86-retpoline-xen-convert-xen-hypercall-indirect-jumps.patch
queue-4.4/x86-retpoline-checksum32-convert-assembler-indirect-jumps.patch
queue-4.4/x86-retpoline-fill-return-stack-buffer-on-vmexit.patch
queue-4.4/x86-retpoline-add-lfence-to-the-retpoline-rsb-filling-rsb-macros.patch
queue-4.4/x86-retpoline-remove-compile-time-warning.patch
queue-4.4/x86-retpoline-add-initial-retpoline-support.patch
This is a note to let you know that I've just added the patch titled
x86/tsc: Future-proof native_calibrate_tsc()
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-tsc-future-proof-native_calibrate_tsc.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From da4ae6c4a0b8dee5a5377a385545d2250fa8cddb Mon Sep 17 00:00:00 2001
From: Len Brown <len.brown(a)intel.com>
Date: Fri, 22 Dec 2017 00:27:54 -0500
Subject: x86/tsc: Future-proof native_calibrate_tsc()
From: Len Brown <len.brown(a)intel.com>
commit da4ae6c4a0b8dee5a5377a385545d2250fa8cddb upstream.
If the crystal frequency cannot be determined via CPUID(15).crystal_khz or
the built-in table then native_calibrate_tsc() will still set the
X86_FEATURE_TSC_KNOWN_FREQ flag which prevents the refined TSC calibration.
As a consequence such systems use cpu_khz for the TSC frequency which is
incorrect when cpu_khz != tsc_khz resulting in time drift.
Return early when the crystal frequency cannot be retrieved without setting
the X86_FEATURE_TSC_KNOWN_FREQ flag. This ensures that the refined TSC
calibration is invoked.
[ tglx: Steam-blastered changelog. Sigh ]
Fixes: 4ca4df0b7eb0 ("x86/tsc: Mark TSC frequency determined by CPUID as known")
Signed-off-by: Len Brown <len.brown(a)intel.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Cc: peterz(a)infradead.org
Cc: Bin Gao <bin.gao(a)intel.com>
Link: https://lkml.kernel.org/r/0fe2503aa7d7fc69137141fc705541a78101d2b9.15139204…
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/kernel/tsc.c | 2 ++
1 file changed, 2 insertions(+)
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -612,6 +612,8 @@ unsigned long native_calibrate_tsc(void)
}
}
+ if (crystal_khz == 0)
+ return 0;
/*
* TSC frequency determined by CPUID is a "hardware reported"
* frequency and is the most accurate one so far we have. This
Patches currently in stable-queue which might be from len.brown(a)intel.com are
queue-4.14/x86-tsc-fix-erroneous-tsc-rate-on-skylake-xeon.patch
queue-4.14/x86-tsc-future-proof-native_calibrate_tsc.patch
This is a note to let you know that I've just added the patch titled
x86/tsc: Fix erroneous TSC rate on Skylake Xeon
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-tsc-fix-erroneous-tsc-rate-on-skylake-xeon.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From b511203093489eb1829cb4de86e8214752205ac6 Mon Sep 17 00:00:00 2001
From: Len Brown <len.brown(a)intel.com>
Date: Fri, 22 Dec 2017 00:27:55 -0500
Subject: x86/tsc: Fix erroneous TSC rate on Skylake Xeon
From: Len Brown <len.brown(a)intel.com>
commit b511203093489eb1829cb4de86e8214752205ac6 upstream.
The INTEL_FAM6_SKYLAKE_X hardcoded crystal_khz value of 25MHZ is
problematic:
- SKX workstations (with same model # as server variants) use a 24 MHz
crystal. This results in a -4.0% time drift rate on SKX workstations.
- SKX servers subject the crystal to an EMI reduction circuit that reduces its
actual frequency by (approximately) -0.25%. This results in -1 second per
10 minute time drift as compared to network time.
This issue can also trigger a timer and power problem, on configurations
that use the LAPIC timer (versus the TSC deadline timer). Clock ticks
scheduled with the LAPIC timer arrive a few usec before the time they are
expected (according to the slow TSC). This causes Linux to poll-idle, when
it should be in an idle power saving state. The idle and clock code do not
graciously recover from this error, sometimes resulting in significant
polling and measurable power impact.
Stop using native_calibrate_tsc() for INTEL_FAM6_SKYLAKE_X.
native_calibrate_tsc() will return 0, boot will run with tsc_khz = cpu_khz,
and the TSC refined calibration will update tsc_khz to correct for the
difference.
[ tglx: Sanitized change log ]
Fixes: 6baf3d61821f ("x86/tsc: Add additional Intel CPU models to the crystal quirk list")
Signed-off-by: Len Brown <len.brown(a)intel.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Cc: peterz(a)infradead.org
Cc: Prarit Bhargava <prarit(a)redhat.com>
Link: https://lkml.kernel.org/r/ff6dcea166e8ff8f2f6a03c17beab2cb436aa779.15139204…
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/kernel/tsc.c | 1 -
1 file changed, 1 deletion(-)
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -602,7 +602,6 @@ unsigned long native_calibrate_tsc(void)
case INTEL_FAM6_KABYLAKE_DESKTOP:
crystal_khz = 24000; /* 24.0 MHz */
break;
- case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_ATOM_DENVERTON:
crystal_khz = 25000; /* 25.0 MHz */
break;
Patches currently in stable-queue which might be from len.brown(a)intel.com are
queue-4.14/x86-tsc-fix-erroneous-tsc-rate-on-skylake-xeon.patch
queue-4.14/x86-tsc-future-proof-native_calibrate_tsc.patch
This is a note to let you know that I've just added the patch titled
x86/retpoline: Fill RSB on context switch for affected CPUs
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-retpoline-fill-rsb-on-context-switch-for-affected-cpus.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From c995efd5a740d9cbafbf58bde4973e8b50b4d761 Mon Sep 17 00:00:00 2001
From: David Woodhouse <dwmw(a)amazon.co.uk>
Date: Fri, 12 Jan 2018 17:49:25 +0000
Subject: x86/retpoline: Fill RSB on context switch for affected CPUs
From: David Woodhouse <dwmw(a)amazon.co.uk>
commit c995efd5a740d9cbafbf58bde4973e8b50b4d761 upstream.
On context switch from a shallow call stack to a deeper one, as the CPU
does 'ret' up the deeper side it may encounter RSB entries (predictions for
where the 'ret' goes to) which were populated in userspace.
This is problematic if neither SMEP nor KPTI (the latter of which marks
userspace pages as NX for the kernel) are active, as malicious code in
userspace may then be executed speculatively.
Overwrite the CPU's return prediction stack with calls which are predicted
to return to an infinite loop, to "capture" speculation if this
happens. This is required both for retpoline, and also in conjunction with
IBRS for !SMEP && !KPTI.
On Skylake+ the problem is slightly different, and an *underflow* of the
RSB may cause errant branch predictions to occur. So there it's not so much
overwrite, as *filling* the RSB to attempt to prevent it getting
empty. This is only a partial solution for Skylake+ since there are many
other conditions which may result in the RSB becoming empty. The full
solution on Skylake+ is to use IBRS, which will prevent the problem even
when the RSB becomes empty. With IBRS, the RSB-stuffing will not be
required on context switch.
[ tglx: Added missing vendor check and slighty massaged comments and
changelog ]
Signed-off-by: David Woodhouse <dwmw(a)amazon.co.uk>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Acked-by: Arjan van de Ven <arjan(a)linux.intel.com>
Cc: gnomes(a)lxorguk.ukuu.org.uk
Cc: Rik van Riel <riel(a)redhat.com>
Cc: Andi Kleen <ak(a)linux.intel.com>
Cc: Josh Poimboeuf <jpoimboe(a)redhat.com>
Cc: thomas.lendacky(a)amd.com
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Jiri Kosina <jikos(a)kernel.org>
Cc: Andy Lutomirski <luto(a)amacapital.net>
Cc: Dave Hansen <dave.hansen(a)intel.com>
Cc: Kees Cook <keescook(a)google.com>
Cc: Tim Chen <tim.c.chen(a)linux.intel.com>
Cc: Greg Kroah-Hartman <gregkh(a)linux-foundation.org>
Cc: Paul Turner <pjt(a)google.com>
Link: https://lkml.kernel.org/r/1515779365-9032-1-git-send-email-dwmw@amazon.co.uk
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/entry/entry_32.S | 11 +++++++++++
arch/x86/entry/entry_64.S | 11 +++++++++++
arch/x86/include/asm/cpufeatures.h | 1 +
arch/x86/kernel/cpu/bugs.c | 36 ++++++++++++++++++++++++++++++++++++
4 files changed, 59 insertions(+)
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -244,6 +244,17 @@ ENTRY(__switch_to_asm)
movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
#endif
+#ifdef CONFIG_RETPOLINE
+ /*
+ * When switching from a shallower to a deeper call stack
+ * the RSB may either underflow or use entries populated
+ * with userspace addresses. On CPUs where those concerns
+ * exist, overwrite the RSB with entries which capture
+ * speculative execution to prevent attack.
+ */
+ FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+#endif
+
/* restore callee-saved registers */
popl %esi
popl %edi
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -487,6 +487,17 @@ ENTRY(__switch_to_asm)
movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
#endif
+#ifdef CONFIG_RETPOLINE
+ /*
+ * When switching from a shallower to a deeper call stack
+ * the RSB may either underflow or use entries populated
+ * with userspace addresses. On CPUs where those concerns
+ * exist, overwrite the RSB with entries which capture
+ * speculative execution to prevent attack.
+ */
+ FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+#endif
+
/* restore callee-saved registers */
popq %r15
popq %r14
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -211,6 +211,7 @@
#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
+#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -23,6 +23,7 @@
#include <asm/alternative.h>
#include <asm/pgtable.h>
#include <asm/set_memory.h>
+#include <asm/intel-family.h>
static void __init spectre_v2_select_mitigation(void);
@@ -155,6 +156,23 @@ disable:
return SPECTRE_V2_CMD_NONE;
}
+/* Check for Skylake-like CPUs (for RSB handling) */
+static bool __init is_skylake_era(void)
+{
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86 == 6) {
+ switch (boot_cpu_data.x86_model) {
+ case INTEL_FAM6_SKYLAKE_MOBILE:
+ case INTEL_FAM6_SKYLAKE_DESKTOP:
+ case INTEL_FAM6_SKYLAKE_X:
+ case INTEL_FAM6_KABYLAKE_MOBILE:
+ case INTEL_FAM6_KABYLAKE_DESKTOP:
+ return true;
+ }
+ }
+ return false;
+}
+
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -213,6 +231,24 @@ retpoline_auto:
spectre_v2_enabled = mode;
pr_info("%s\n", spectre_v2_strings[mode]);
+
+ /*
+ * If neither SMEP or KPTI are available, there is a risk of
+ * hitting userspace addresses in the RSB after a context switch
+ * from a shallow call stack to a deeper one. To prevent this fill
+ * the entire RSB, even when using IBRS.
+ *
+ * Skylake era CPUs have a separate issue with *underflow* of the
+ * RSB, when they will predict 'ret' targets from the generic BTB.
+ * The proper mitigation for this is IBRS. If IBRS is not supported
+ * or deactivated in favour of retpolines the RSB fill on context
+ * switch is required.
+ */
+ if ((!boot_cpu_has(X86_FEATURE_PTI) &&
+ !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+ pr_info("Filling RSB on context switch\n");
+ }
}
#undef pr_fmt
Patches currently in stable-queue which might be from dwmw(a)amazon.co.uk are
queue-4.14/module-add-retpoline-tag-to-vermagic.patch
queue-4.14/x86-retpoline-fill-rsb-on-context-switch-for-affected-cpus.patch
queue-4.14/x86-retpoline-add-lfence-to-the-retpoline-rsb-filling-rsb-macros.patch