This is a note to let you know that I've just added the patch titled
x86/mm: Use/Fix PCID to optimize user/kernel switches
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-mm-use-fix-pcid-to-optimize-user-kernel-switches.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 6fd166aae78c0ab738d49bda653cbd9e3b1491cf Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz(a)infradead.org>
Date: Mon, 4 Dec 2017 15:07:59 +0100
Subject: x86/mm: Use/Fix PCID to optimize user/kernel switches
From: Peter Zijlstra <peterz(a)infradead.org>
commit 6fd166aae78c0ab738d49bda653cbd9e3b1491cf upstream.
We can use PCID to retain the TLBs across CR3 switches; including those now
part of the user/kernel switch. This increases performance of kernel
entry/exit at the cost of more expensive/complicated TLB flushing.
Now that we have two address spaces, one for kernel and one for user space,
we need two PCIDs per mm. We use the top PCID bit to indicate a user PCID
(just like we use the PFN LSB for the PGD). Since we do TLB invalidation
from kernel space, the existing code will only invalidate the kernel PCID,
we augment that by marking the corresponding user PCID invalid, and upon
switching back to userspace, use a flushing CR3 write for the switch.
In order to access the user_pcid_flush_mask we use PER_CPU storage, which
means the previously established SWAPGS vs CR3 ordering is now mandatory
and required.
Having to do this memory access does require additional registers, most
sites have a functioning stack and we can spill one (RAX), sites without
functional stack need to otherwise provide the second scratch register.
Note: PCID is generally available on Intel Sandybridge and later CPUs.
Note: Up until this point TLB flushing was broken in this series.
Based-on-code-from: Dave Hansen <dave.hansen(a)linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz(a)infradead.org>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Andy Lutomirski <luto(a)kernel.org>
Cc: Boris Ostrovsky <boris.ostrovsky(a)oracle.com>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Brian Gerst <brgerst(a)gmail.com>
Cc: Dave Hansen <dave.hansen(a)linux.intel.com>
Cc: David Laight <David.Laight(a)aculab.com>
Cc: Denys Vlasenko <dvlasenk(a)redhat.com>
Cc: Eduardo Valentin <eduval(a)amazon.com>
Cc: Greg KH <gregkh(a)linuxfoundation.org>
Cc: H. Peter Anvin <hpa(a)zytor.com>
Cc: Josh Poimboeuf <jpoimboe(a)redhat.com>
Cc: Juergen Gross <jgross(a)suse.com>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Will Deacon <will.deacon(a)arm.com>
Cc: aliguori(a)amazon.com
Cc: daniel.gruss(a)iaik.tugraz.at
Cc: hughd(a)google.com
Cc: keescook(a)google.com
Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/entry/calling.h | 72 ++++++++++++++++++----
arch/x86/entry/entry_64.S | 9 +-
arch/x86/entry/entry_64_compat.S | 4 -
arch/x86/include/asm/processor-flags.h | 5 +
arch/x86/include/asm/tlbflush.h | 91 ++++++++++++++++++++++++----
arch/x86/include/uapi/asm/processor-flags.h | 7 +-
arch/x86/kernel/asm-offsets.c | 4 +
arch/x86/mm/init.c | 2
arch/x86/mm/tlb.c | 1
9 files changed, 162 insertions(+), 33 deletions(-)
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -3,6 +3,9 @@
#include <asm/unwind_hints.h>
#include <asm/cpufeatures.h>
#include <asm/page_types.h>
+#include <asm/percpu.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor-flags.h>
/*
@@ -191,17 +194,21 @@ For 32-bit we have the following convent
#ifdef CONFIG_PAGE_TABLE_ISOLATION
-/* PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two halves: */
-#define PTI_SWITCH_MASK (1<<PAGE_SHIFT)
+/*
+ * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two
+ * halves:
+ */
+#define PTI_SWITCH_PGTABLES_MASK (1<<PAGE_SHIFT)
+#define PTI_SWITCH_MASK (PTI_SWITCH_PGTABLES_MASK|(1<<X86_CR3_PTI_SWITCH_BIT))
-.macro ADJUST_KERNEL_CR3 reg:req
- /* Clear "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
- andq $(~PTI_SWITCH_MASK), \reg
+.macro SET_NOFLUSH_BIT reg:req
+ bts $X86_CR3_PCID_NOFLUSH_BIT, \reg
.endm
-.macro ADJUST_USER_CR3 reg:req
- /* Move CR3 up a page to the user page tables: */
- orq $(PTI_SWITCH_MASK), \reg
+.macro ADJUST_KERNEL_CR3 reg:req
+ ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
+ /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
+ andq $(~PTI_SWITCH_MASK), \reg
.endm
.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
@@ -212,21 +219,58 @@ For 32-bit we have the following convent
.Lend_\@:
.endm
-.macro SWITCH_TO_USER_CR3 scratch_reg:req
+#define THIS_CPU_user_pcid_flush_mask \
+ PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
+
+.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
mov %cr3, \scratch_reg
- ADJUST_USER_CR3 \scratch_reg
+
+ ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
+
+ /*
+ * Test if the ASID needs a flush.
+ */
+ movq \scratch_reg, \scratch_reg2
+ andq $(0x7FF), \scratch_reg /* mask ASID */
+ bt \scratch_reg, THIS_CPU_user_pcid_flush_mask
+ jnc .Lnoflush_\@
+
+ /* Flush needed, clear the bit */
+ btr \scratch_reg, THIS_CPU_user_pcid_flush_mask
+ movq \scratch_reg2, \scratch_reg
+ jmp .Lwrcr3_\@
+
+.Lnoflush_\@:
+ movq \scratch_reg2, \scratch_reg
+ SET_NOFLUSH_BIT \scratch_reg
+
+.Lwrcr3_\@:
+ /* Flip the PGD and ASID to the user version */
+ orq $(PTI_SWITCH_MASK), \scratch_reg
mov \scratch_reg, %cr3
.Lend_\@:
.endm
+.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
+ pushq %rax
+ SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax
+ popq %rax
+.endm
+
.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
movq %cr3, \scratch_reg
movq \scratch_reg, \save_reg
/*
- * Is the switch bit zero? This means the address is
- * up in real PAGE_TABLE_ISOLATION patches in a moment.
+ * Is the "switch mask" all zero? That means that both of
+ * these are zero:
+ *
+ * 1. The user/kernel PCID bit, and
+ * 2. The user/kernel "bit" that points CR3 to the
+ * bottom half of the 8k PGD
+ *
+ * That indicates a kernel CR3 value, not a user CR3.
*/
testq $(PTI_SWITCH_MASK), \scratch_reg
jz .Ldone_\@
@@ -251,7 +295,9 @@ For 32-bit we have the following convent
.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
.endm
-.macro SWITCH_TO_USER_CR3 scratch_reg:req
+.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
+.endm
+.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
.endm
.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
.endm
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -23,7 +23,6 @@
#include <asm/segment.h>
#include <asm/cache.h>
#include <asm/errno.h>
-#include "calling.h"
#include <asm/asm-offsets.h>
#include <asm/msr.h>
#include <asm/unistd.h>
@@ -40,6 +39,8 @@
#include <asm/frame.h>
#include <linux/err.h>
+#include "calling.h"
+
.code64
.section .entry.text, "ax"
@@ -406,7 +407,7 @@ syscall_return_via_sysret:
* We are on the trampoline stack. All regs except RDI are live.
* We can do future final exit work right here.
*/
- SWITCH_TO_USER_CR3 scratch_reg=%rdi
+ SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
popq %rdi
popq %rsp
@@ -744,7 +745,7 @@ GLOBAL(swapgs_restore_regs_and_return_to
* We can do future final exit work right here.
*/
- SWITCH_TO_USER_CR3 scratch_reg=%rdi
+ SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
/* Restore RDI. */
popq %rdi
@@ -857,7 +858,7 @@ native_irq_return_ldt:
*/
orq PER_CPU_VAR(espfix_stack), %rax
- SWITCH_TO_USER_CR3 scratch_reg=%rdi /* to user CR3 */
+ SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
SWAPGS /* to user GS */
popq %rdi /* Restore user RDI */
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -275,9 +275,9 @@ sysret32_from_system_call:
* switch until after after the last reference to the process
* stack.
*
- * %r8 is zeroed before the sysret, thus safe to clobber.
+ * %r8/%r9 are zeroed before the sysret, thus safe to clobber.
*/
- SWITCH_TO_USER_CR3 scratch_reg=%r8
+ SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9
xorq %r8, %r8
xorq %r9, %r9
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -38,6 +38,11 @@
#define CR3_ADDR_MASK __sme_clr(0x7FFFFFFFFFFFF000ull)
#define CR3_PCID_MASK 0xFFFull
#define CR3_NOFLUSH BIT_ULL(63)
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+# define X86_CR3_PTI_SWITCH_BIT 11
+#endif
+
#else
/*
* CR3_ADDR_MASK needs at least bits 31:5 set on PAE systems, and we save
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -10,6 +10,8 @@
#include <asm/special_insns.h>
#include <asm/smp.h>
#include <asm/invpcid.h>
+#include <asm/pti.h>
+#include <asm/processor-flags.h>
static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
{
@@ -24,24 +26,54 @@ static inline u64 inc_mm_tlb_gen(struct
/* There are 12 bits of space for ASIDS in CR3 */
#define CR3_HW_ASID_BITS 12
+
/*
* When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
* user/kernel switches
*/
-#define PTI_CONSUMED_ASID_BITS 0
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+# define PTI_CONSUMED_PCID_BITS 1
+#else
+# define PTI_CONSUMED_PCID_BITS 0
+#endif
+
+#define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
-#define CR3_AVAIL_ASID_BITS (CR3_HW_ASID_BITS - PTI_CONSUMED_ASID_BITS)
/*
* ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
* for them being zero-based. Another -1 is because ASID 0 is reserved for
* use by non-PCID-aware users.
*/
-#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_ASID_BITS) - 2)
+#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2)
+
+/*
+ * 6 because 6 should be plenty and struct tlb_state will fit in two cache
+ * lines.
+ */
+#define TLB_NR_DYN_ASIDS 6
static inline u16 kern_pcid(u16 asid)
{
VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
/*
+ * Make sure that the dynamic ASID space does not confict with the
+ * bit we are using to switch between user and kernel ASIDs.
+ */
+ BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_SWITCH_BIT));
+
+ /*
+ * The ASID being passed in here should have respected the
+ * MAX_ASID_AVAILABLE and thus never have the switch bit set.
+ */
+ VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_SWITCH_BIT));
+#endif
+ /*
+ * The dynamically-assigned ASIDs that get passed in are small
+ * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set,
+ * so do not bother to clear it.
+ *
* If PCID is on, ASID-aware code paths put the ASID+1 into the
* PCID bits. This serves two purposes. It prevents a nasty
* situation in which PCID-unaware code saves CR3, loads some other
@@ -95,12 +127,6 @@ static inline bool tlb_defer_switch_to_i
return !static_cpu_has(X86_FEATURE_PCID);
}
-/*
- * 6 because 6 should be plenty and struct tlb_state will fit in
- * two cache lines.
- */
-#define TLB_NR_DYN_ASIDS 6
-
struct tlb_context {
u64 ctx_id;
u64 tlb_gen;
@@ -146,6 +172,13 @@ struct tlb_state {
bool invalidate_other;
/*
+ * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
+ * the corresponding user PCID needs a flush next time we
+ * switch to it; see SWITCH_TO_USER_CR3.
+ */
+ unsigned short user_pcid_flush_mask;
+
+ /*
* Access to this CR4 shadow and to H/W CR4 is protected by
* disabling interrupts when modifying either one.
*/
@@ -250,14 +283,41 @@ static inline void cr4_set_bits_and_upda
extern void initialize_tlbstate_and_flush(void);
/*
+ * Given an ASID, flush the corresponding user ASID. We can delay this
+ * until the next time we switch to it.
+ *
+ * See SWITCH_TO_USER_CR3.
+ */
+static inline void invalidate_user_asid(u16 asid)
+{
+ /* There is no user ASID if address space separation is off */
+ if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
+ return;
+
+ /*
+ * We only have a single ASID if PCID is off and the CR3
+ * write will have flushed it.
+ */
+ if (!cpu_feature_enabled(X86_FEATURE_PCID))
+ return;
+
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return;
+
+ __set_bit(kern_pcid(asid),
+ (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask));
+}
+
+/*
* flush the entire current user mapping
*/
static inline void __native_flush_tlb(void)
{
+ invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
/*
- * If current->mm == NULL then we borrow a mm which may change during a
- * task switch and therefore we must not be preempted while we write CR3
- * back:
+ * If current->mm == NULL then we borrow a mm which may change
+ * during a task switch and therefore we must not be preempted
+ * while we write CR3 back:
*/
preempt_disable();
native_write_cr3(__native_read_cr3());
@@ -301,7 +361,14 @@ static inline void __native_flush_tlb_gl
*/
static inline void __native_flush_tlb_single(unsigned long addr)
{
+ u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+
asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return;
+
+ invalidate_user_asid(loaded_mm_asid);
}
/*
--- a/arch/x86/include/uapi/asm/processor-flags.h
+++ b/arch/x86/include/uapi/asm/processor-flags.h
@@ -78,7 +78,12 @@
#define X86_CR3_PWT _BITUL(X86_CR3_PWT_BIT)
#define X86_CR3_PCD_BIT 4 /* Page Cache Disable */
#define X86_CR3_PCD _BITUL(X86_CR3_PCD_BIT)
-#define X86_CR3_PCID_MASK _AC(0x00000fff,UL) /* PCID Mask */
+
+#define X86_CR3_PCID_BITS 12
+#define X86_CR3_PCID_MASK (_AC((1UL << X86_CR3_PCID_BITS) - 1, UL))
+
+#define X86_CR3_PCID_NOFLUSH_BIT 63 /* Preserve old PCID */
+#define X86_CR3_PCID_NOFLUSH _BITULL(X86_CR3_PCID_NOFLUSH_BIT)
/*
* Intel CPU features in CR4
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -17,6 +17,7 @@
#include <asm/sigframe.h>
#include <asm/bootparam.h>
#include <asm/suspend.h>
+#include <asm/tlbflush.h>
#ifdef CONFIG_XEN
#include <xen/interface/xen.h>
@@ -94,6 +95,9 @@ void common(void) {
BLANK();
DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
+ /* TLB state for the entry code */
+ OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask);
+
/* Layout info for cpu_entry_area */
OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -855,7 +855,7 @@ void __init zone_sizes_init(void)
free_area_init_nodes(max_zone_pfns);
}
-DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
+__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
.loaded_mm = &init_mm,
.next_asid = 1,
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -105,6 +105,7 @@ static void load_new_mm_cr3(pgd_t *pgdir
unsigned long new_mm_cr3;
if (need_flush) {
+ invalidate_user_asid(new_asid);
new_mm_cr3 = build_cr3(pgdir, new_asid);
} else {
new_mm_cr3 = build_cr3_noflush(pgdir, new_asid);
Patches currently in stable-queue which might be from peterz(a)infradead.org are
queue-4.14/x86-mm-pti-allow-nx-poison-to-be-set-in-p4d-pgd.patch
queue-4.14/x86-dumpstack-indicate-in-oops-whether-pti-is-configured-and-enabled.patch
queue-4.14/x86-mm-clarify-the-whole-asid-kernel-pcid-user-pcid-naming.patch
queue-4.14/x86-mm-abstract-switching-cr3.patch
queue-4.14/x86-mm-optimize-restore_cr3.patch
queue-4.14/x86-mm-pti-force-entry-through-trampoline-when-pti-active.patch
queue-4.14/x86-entry-align-entry-text-section-to-pmd-boundary.patch
queue-4.14/x86-mm-64-make-a-full-pgd-entry-size-hole-in-the-memory-map.patch
queue-4.14/x86-cpu_entry_area-add-debugstore-entries-to-cpu_entry_area.patch
queue-4.14/x86-mm-pti-share-entry-text-pmd.patch
queue-4.14/x86-ldt-make-the-ldt-mapping-ro.patch
queue-4.14/x86-mm-dump_pagetables-check-user-space-page-table-for-wx-pages.patch
queue-4.14/x86-pti-map-the-vsyscall-page-if-needed.patch
queue-4.14/x86-mm-pti-disable-global-pages-if-page_table_isolation-y.patch
queue-4.14/x86-mm-dump_pagetables-allow-dumping-current-pagetables.patch
queue-4.14/x86-mm-pti-add-infrastructure-for-page-table-isolation.patch
queue-4.14/x86-cpufeatures-add-x86_bug_cpu_insecure.patch
queue-4.14/x86-mm-use-fix-pcid-to-optimize-user-kernel-switches.patch
queue-4.14/x86-mm-use-invpcid-for-__native_flush_tlb_single.patch
queue-4.14/x86-mm-pti-prepare-the-x86-entry-assembly-code-for-entry-exit-cr3-switching.patch
queue-4.14/x86-mm-allow-flushing-for-future-asid-switches.patch
queue-4.14/x86-mm-pti-add-kconfig.patch
queue-4.14/x86-mm-pti-add-mapping-helper-functions.patch
queue-4.14/x86-mm-pti-map-espfix-into-user-space.patch
queue-4.14/x86-mm-pti-share-cpu_entry_area-with-user-space-page-tables.patch
queue-4.14/x86-mm-pti-add-functions-to-clone-kernel-pmds.patch
queue-4.14/x86-pti-add-the-pti-cmdline-option-and-documentation.patch
queue-4.14/x86-events-intel-ds-map-debug-buffers-in-cpu_entry_area.patch
queue-4.14/x86-mm-pti-allocate-a-separate-user-pgd.patch
queue-4.14/x86-pti-put-the-ldt-in-its-own-pgd-if-pti-is-on.patch
queue-4.14/x86-mm-pti-populate-user-pgd.patch
queue-4.14/x86-mm-dump_pagetables-add-page-table-directory-to-the-debugfs-vfs-hierarchy.patch
This is a note to let you know that I've just added the patch titled
x86/mm/pti: Share entry text PMD
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-mm-pti-share-entry-text-pmd.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 6dc72c3cbca0580642808d677181cad4c6433893 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx(a)linutronix.de>
Date: Mon, 4 Dec 2017 15:07:47 +0100
Subject: x86/mm/pti: Share entry text PMD
From: Thomas Gleixner <tglx(a)linutronix.de>
commit 6dc72c3cbca0580642808d677181cad4c6433893 upstream.
Share the entry text PMD of the kernel mapping with the user space
mapping. If large pages are enabled this is a single PMD entry and at the
point where it is copied into the user page table the RW bit has not been
cleared yet. Clear it right away so the user space visible map becomes RX.
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Andy Lutomirski <luto(a)kernel.org>
Cc: Boris Ostrovsky <boris.ostrovsky(a)oracle.com>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Brian Gerst <brgerst(a)gmail.com>
Cc: Dave Hansen <dave.hansen(a)linux.intel.com>
Cc: David Laight <David.Laight(a)aculab.com>
Cc: Denys Vlasenko <dvlasenk(a)redhat.com>
Cc: Eduardo Valentin <eduval(a)amazon.com>
Cc: Greg KH <gregkh(a)linuxfoundation.org>
Cc: H. Peter Anvin <hpa(a)zytor.com>
Cc: Josh Poimboeuf <jpoimboe(a)redhat.com>
Cc: Juergen Gross <jgross(a)suse.com>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Will Deacon <will.deacon(a)arm.com>
Cc: aliguori(a)amazon.com
Cc: daniel.gruss(a)iaik.tugraz.at
Cc: hughd(a)google.com
Cc: keescook(a)google.com
Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/mm/pti.c | 10 ++++++++++
1 file changed, 10 insertions(+)
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -288,6 +288,15 @@ static void __init pti_clone_user_shared
}
/*
+ * Clone the populated PMDs of the entry and irqentry text and force it RO.
+ */
+static void __init pti_clone_entry_text(void)
+{
+ pti_clone_pmds((unsigned long) __entry_text_start,
+ (unsigned long) __irqentry_text_end, _PAGE_RW);
+}
+
+/*
* Initialize kernel page table isolation
*/
void __init pti_init(void)
@@ -298,4 +307,5 @@ void __init pti_init(void)
pr_info("enabled\n");
pti_clone_user_shared();
+ pti_clone_entry_text();
}
Patches currently in stable-queue which might be from tglx(a)linutronix.de are
queue-4.14/x86-mm-pti-allow-nx-poison-to-be-set-in-p4d-pgd.patch
queue-4.14/x86-dumpstack-indicate-in-oops-whether-pti-is-configured-and-enabled.patch
queue-4.14/x86-mm-clarify-the-whole-asid-kernel-pcid-user-pcid-naming.patch
queue-4.14/x86-mm-abstract-switching-cr3.patch
queue-4.14/x86-mm-optimize-restore_cr3.patch
queue-4.14/x86-mm-pti-force-entry-through-trampoline-when-pti-active.patch
queue-4.14/x86-entry-align-entry-text-section-to-pmd-boundary.patch
queue-4.14/x86-mm-64-make-a-full-pgd-entry-size-hole-in-the-memory-map.patch
queue-4.14/x86-cpu_entry_area-add-debugstore-entries-to-cpu_entry_area.patch
queue-4.14/x86-mm-pti-share-entry-text-pmd.patch
queue-4.14/x86-ldt-make-the-ldt-mapping-ro.patch
queue-4.14/x86-mm-dump_pagetables-check-user-space-page-table-for-wx-pages.patch
queue-4.14/x86-pti-map-the-vsyscall-page-if-needed.patch
queue-4.14/x86-mm-pti-disable-global-pages-if-page_table_isolation-y.patch
queue-4.14/x86-mm-dump_pagetables-allow-dumping-current-pagetables.patch
queue-4.14/x86-mm-pti-add-infrastructure-for-page-table-isolation.patch
queue-4.14/x86-cpufeatures-add-x86_bug_cpu_insecure.patch
queue-4.14/x86-mm-use-fix-pcid-to-optimize-user-kernel-switches.patch
queue-4.14/x86-mm-use-invpcid-for-__native_flush_tlb_single.patch
queue-4.14/x86-mm-pti-prepare-the-x86-entry-assembly-code-for-entry-exit-cr3-switching.patch
queue-4.14/x86-mm-allow-flushing-for-future-asid-switches.patch
queue-4.14/x86-mm-pti-add-kconfig.patch
queue-4.14/x86-mm-pti-add-mapping-helper-functions.patch
queue-4.14/x86-mm-pti-map-espfix-into-user-space.patch
queue-4.14/x86-mm-pti-share-cpu_entry_area-with-user-space-page-tables.patch
queue-4.14/x86-mm-pti-add-functions-to-clone-kernel-pmds.patch
queue-4.14/x86-pti-add-the-pti-cmdline-option-and-documentation.patch
queue-4.14/x86-events-intel-ds-map-debug-buffers-in-cpu_entry_area.patch
queue-4.14/x86-mm-pti-allocate-a-separate-user-pgd.patch
queue-4.14/x86-pti-put-the-ldt-in-its-own-pgd-if-pti-is-on.patch
queue-4.14/x86-mm-pti-populate-user-pgd.patch
queue-4.14/x86-mm-dump_pagetables-add-page-table-directory-to-the-debugfs-vfs-hierarchy.patch
This is a note to let you know that I've just added the patch titled
x86/mm/pti: Share cpu_entry_area with user space page tables
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-mm-pti-share-cpu_entry_area-with-user-space-page-tables.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From f7cfbee91559ca7e3e961a00ffac921208a115ad Mon Sep 17 00:00:00 2001
From: Andy Lutomirski <luto(a)kernel.org>
Date: Mon, 4 Dec 2017 15:07:45 +0100
Subject: x86/mm/pti: Share cpu_entry_area with user space page tables
From: Andy Lutomirski <luto(a)kernel.org>
commit f7cfbee91559ca7e3e961a00ffac921208a115ad upstream.
Share the cpu entry area so the user space and kernel space page tables
have the same P4D page.
Signed-off-by: Andy Lutomirski <luto(a)kernel.org>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Boris Ostrovsky <boris.ostrovsky(a)oracle.com>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Brian Gerst <brgerst(a)gmail.com>
Cc: Dave Hansen <dave.hansen(a)linux.intel.com>
Cc: David Laight <David.Laight(a)aculab.com>
Cc: Denys Vlasenko <dvlasenk(a)redhat.com>
Cc: Eduardo Valentin <eduval(a)amazon.com>
Cc: Greg KH <gregkh(a)linuxfoundation.org>
Cc: H. Peter Anvin <hpa(a)zytor.com>
Cc: Josh Poimboeuf <jpoimboe(a)redhat.com>
Cc: Juergen Gross <jgross(a)suse.com>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Will Deacon <will.deacon(a)arm.com>
Cc: aliguori(a)amazon.com
Cc: daniel.gruss(a)iaik.tugraz.at
Cc: hughd(a)google.com
Cc: keescook(a)google.com
Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/mm/pti.c | 25 +++++++++++++++++++++++++
1 file changed, 25 insertions(+)
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -265,6 +265,29 @@ pti_clone_pmds(unsigned long start, unsi
}
/*
+ * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
+ * next-level entry on 5-level systems.
+ */
+static void __init pti_clone_p4d(unsigned long addr)
+{
+ p4d_t *kernel_p4d, *user_p4d;
+ pgd_t *kernel_pgd;
+
+ user_p4d = pti_user_pagetable_walk_p4d(addr);
+ kernel_pgd = pgd_offset_k(addr);
+ kernel_p4d = p4d_offset(kernel_pgd, addr);
+ *user_p4d = *kernel_p4d;
+}
+
+/*
+ * Clone the CPU_ENTRY_AREA into the user space visible page table.
+ */
+static void __init pti_clone_user_shared(void)
+{
+ pti_clone_p4d(CPU_ENTRY_AREA_BASE);
+}
+
+/*
* Initialize kernel page table isolation
*/
void __init pti_init(void)
@@ -273,4 +296,6 @@ void __init pti_init(void)
return;
pr_info("enabled\n");
+
+ pti_clone_user_shared();
}
Patches currently in stable-queue which might be from luto(a)kernel.org are
queue-4.14/x86-mm-pti-allow-nx-poison-to-be-set-in-p4d-pgd.patch
queue-4.14/x86-dumpstack-indicate-in-oops-whether-pti-is-configured-and-enabled.patch
queue-4.14/x86-mm-clarify-the-whole-asid-kernel-pcid-user-pcid-naming.patch
queue-4.14/x86-mm-abstract-switching-cr3.patch
queue-4.14/x86-mm-optimize-restore_cr3.patch
queue-4.14/x86-mm-pti-force-entry-through-trampoline-when-pti-active.patch
queue-4.14/x86-entry-align-entry-text-section-to-pmd-boundary.patch
queue-4.14/x86-mm-64-make-a-full-pgd-entry-size-hole-in-the-memory-map.patch
queue-4.14/x86-cpu_entry_area-add-debugstore-entries-to-cpu_entry_area.patch
queue-4.14/x86-mm-pti-share-entry-text-pmd.patch
queue-4.14/x86-ldt-make-the-ldt-mapping-ro.patch
queue-4.14/x86-mm-dump_pagetables-check-user-space-page-table-for-wx-pages.patch
queue-4.14/x86-pti-map-the-vsyscall-page-if-needed.patch
queue-4.14/x86-mm-pti-disable-global-pages-if-page_table_isolation-y.patch
queue-4.14/x86-mm-dump_pagetables-allow-dumping-current-pagetables.patch
queue-4.14/x86-mm-pti-add-infrastructure-for-page-table-isolation.patch
queue-4.14/x86-cpufeatures-add-x86_bug_cpu_insecure.patch
queue-4.14/x86-mm-use-fix-pcid-to-optimize-user-kernel-switches.patch
queue-4.14/x86-mm-use-invpcid-for-__native_flush_tlb_single.patch
queue-4.14/x86-mm-pti-prepare-the-x86-entry-assembly-code-for-entry-exit-cr3-switching.patch
queue-4.14/x86-mm-allow-flushing-for-future-asid-switches.patch
queue-4.14/x86-mm-pti-add-kconfig.patch
queue-4.14/x86-mm-pti-add-mapping-helper-functions.patch
queue-4.14/x86-mm-pti-map-espfix-into-user-space.patch
queue-4.14/x86-mm-pti-share-cpu_entry_area-with-user-space-page-tables.patch
queue-4.14/x86-mm-pti-add-functions-to-clone-kernel-pmds.patch
queue-4.14/x86-pti-add-the-pti-cmdline-option-and-documentation.patch
queue-4.14/x86-events-intel-ds-map-debug-buffers-in-cpu_entry_area.patch
queue-4.14/x86-mm-pti-allocate-a-separate-user-pgd.patch
queue-4.14/x86-pti-put-the-ldt-in-its-own-pgd-if-pti-is-on.patch
queue-4.14/x86-mm-pti-populate-user-pgd.patch
queue-4.14/x86-mm-dump_pagetables-add-page-table-directory-to-the-debugfs-vfs-hierarchy.patch
This is a note to let you know that I've just added the patch titled
x86/mm/pti: Prepare the x86/entry assembly code for entry/exit CR3 switching
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-mm-pti-prepare-the-x86-entry-assembly-code-for-entry-exit-cr3-switching.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 8a09317b895f073977346779df52f67c1056d81d Mon Sep 17 00:00:00 2001
From: Dave Hansen <dave.hansen(a)linux.intel.com>
Date: Mon, 4 Dec 2017 15:07:35 +0100
Subject: x86/mm/pti: Prepare the x86/entry assembly code for entry/exit CR3 switching
From: Dave Hansen <dave.hansen(a)linux.intel.com>
commit 8a09317b895f073977346779df52f67c1056d81d upstream.
PAGE_TABLE_ISOLATION needs to switch to a different CR3 value when it
enters the kernel and switch back when it exits. This essentially needs to
be done before leaving assembly code.
This is extra challenging because the switching context is tricky: the
registers that can be clobbered can vary. It is also hard to store things
on the stack because there is an established ABI (ptregs) or the stack is
entirely unsafe to use.
Establish a set of macros that allow changing to the user and kernel CR3
values.
Interactions with SWAPGS:
Previous versions of the PAGE_TABLE_ISOLATION code relied on having
per-CPU scratch space to save/restore a register that can be used for the
CR3 MOV. The %GS register is used to index into our per-CPU space, so
SWAPGS *had* to be done before the CR3 switch. That scratch space is gone
now, but the semantic that SWAPGS must be done before the CR3 MOV is
retained. This is good to keep because it is not that hard to do and it
allows to do things like add per-CPU debugging information.
What this does in the NMI code is worth pointing out. NMIs can interrupt
*any* context and they can also be nested with NMIs interrupting other
NMIs. The comments below ".Lnmi_from_kernel" explain the format of the
stack during this situation. Changing the format of this stack is hard.
Instead of storing the old CR3 value on the stack, this depends on the
*regular* register save/restore mechanism and then uses %r14 to keep CR3
during the NMI. It is callee-saved and will not be clobbered by the C NMI
handlers that get called.
[ PeterZ: ESPFIX optimization ]
Based-on-code-from: Andy Lutomirski <luto(a)kernel.org>
Signed-off-by: Dave Hansen <dave.hansen(a)linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Reviewed-by: Borislav Petkov <bp(a)suse.de>
Reviewed-by: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Andy Lutomirski <luto(a)kernel.org>
Cc: Boris Ostrovsky <boris.ostrovsky(a)oracle.com>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Brian Gerst <brgerst(a)gmail.com>
Cc: David Laight <David.Laight(a)aculab.com>
Cc: Denys Vlasenko <dvlasenk(a)redhat.com>
Cc: Eduardo Valentin <eduval(a)amazon.com>
Cc: Greg KH <gregkh(a)linuxfoundation.org>
Cc: H. Peter Anvin <hpa(a)zytor.com>
Cc: Josh Poimboeuf <jpoimboe(a)redhat.com>
Cc: Juergen Gross <jgross(a)suse.com>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Will Deacon <will.deacon(a)arm.com>
Cc: aliguori(a)amazon.com
Cc: daniel.gruss(a)iaik.tugraz.at
Cc: hughd(a)google.com
Cc: keescook(a)google.com
Cc: linux-mm(a)kvack.org
Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/entry/calling.h | 66 +++++++++++++++++++++++++++++++++++++++
arch/x86/entry/entry_64.S | 45 +++++++++++++++++++++++---
arch/x86/entry/entry_64_compat.S | 24 +++++++++++++-
3 files changed, 128 insertions(+), 7 deletions(-)
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/jump_label.h>
#include <asm/unwind_hints.h>
+#include <asm/cpufeatures.h>
+#include <asm/page_types.h>
/*
@@ -187,6 +189,70 @@ For 32-bit we have the following convent
#endif
.endm
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+
+/* PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two halves: */
+#define PTI_SWITCH_MASK (1<<PAGE_SHIFT)
+
+.macro ADJUST_KERNEL_CR3 reg:req
+ /* Clear "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
+ andq $(~PTI_SWITCH_MASK), \reg
+.endm
+
+.macro ADJUST_USER_CR3 reg:req
+ /* Move CR3 up a page to the user page tables: */
+ orq $(PTI_SWITCH_MASK), \reg
+.endm
+
+.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
+ mov %cr3, \scratch_reg
+ ADJUST_KERNEL_CR3 \scratch_reg
+ mov \scratch_reg, %cr3
+.endm
+
+.macro SWITCH_TO_USER_CR3 scratch_reg:req
+ mov %cr3, \scratch_reg
+ ADJUST_USER_CR3 \scratch_reg
+ mov \scratch_reg, %cr3
+.endm
+
+.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
+ movq %cr3, \scratch_reg
+ movq \scratch_reg, \save_reg
+ /*
+ * Is the switch bit zero? This means the address is
+ * up in real PAGE_TABLE_ISOLATION patches in a moment.
+ */
+ testq $(PTI_SWITCH_MASK), \scratch_reg
+ jz .Ldone_\@
+
+ ADJUST_KERNEL_CR3 \scratch_reg
+ movq \scratch_reg, %cr3
+
+.Ldone_\@:
+.endm
+
+.macro RESTORE_CR3 save_reg:req
+ /*
+ * The CR3 write could be avoided when not changing its value,
+ * but would require a CR3 read *and* a scratch register.
+ */
+ movq \save_reg, %cr3
+.endm
+
+#else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
+
+.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
+.endm
+.macro SWITCH_TO_USER_CR3 scratch_reg:req
+.endm
+.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
+.endm
+.macro RESTORE_CR3 save_reg:req
+.endm
+
+#endif
+
#endif /* CONFIG_X86_64 */
/*
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -164,6 +164,9 @@ ENTRY(entry_SYSCALL_64_trampoline)
/* Stash the user RSP. */
movq %rsp, RSP_SCRATCH
+ /* Note: using %rsp as a scratch reg. */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
+
/* Load the top of the task stack into RSP */
movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp
@@ -203,6 +206,10 @@ ENTRY(entry_SYSCALL_64)
*/
swapgs
+ /*
+ * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it
+ * is not required to switch CR3.
+ */
movq %rsp, PER_CPU_VAR(rsp_scratch)
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
@@ -399,6 +406,7 @@ syscall_return_via_sysret:
* We are on the trampoline stack. All regs except RDI are live.
* We can do future final exit work right here.
*/
+ SWITCH_TO_USER_CR3 scratch_reg=%rdi
popq %rdi
popq %rsp
@@ -736,6 +744,8 @@ GLOBAL(swapgs_restore_regs_and_return_to
* We can do future final exit work right here.
*/
+ SWITCH_TO_USER_CR3 scratch_reg=%rdi
+
/* Restore RDI. */
popq %rdi
SWAPGS
@@ -818,7 +828,9 @@ native_irq_return_ldt:
*/
pushq %rdi /* Stash user RDI */
- SWAPGS
+ SWAPGS /* to kernel GS */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
+
movq PER_CPU_VAR(espfix_waddr), %rdi
movq %rax, (0*8)(%rdi) /* user RAX */
movq (1*8)(%rsp), %rax /* user RIP */
@@ -834,7 +846,6 @@ native_irq_return_ldt:
/* Now RAX == RSP. */
andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */
- popq %rdi /* Restore user RDI */
/*
* espfix_stack[31:16] == 0. The page tables are set up such that
@@ -845,7 +856,11 @@ native_irq_return_ldt:
* still points to an RO alias of the ESPFIX stack.
*/
orq PER_CPU_VAR(espfix_stack), %rax
- SWAPGS
+
+ SWITCH_TO_USER_CR3 scratch_reg=%rdi /* to user CR3 */
+ SWAPGS /* to user GS */
+ popq %rdi /* Restore user RDI */
+
movq %rax, %rsp
UNWIND_HINT_IRET_REGS offset=8
@@ -945,6 +960,8 @@ ENTRY(switch_to_thread_stack)
UNWIND_HINT_FUNC
pushq %rdi
+ /* Need to switch before accessing the thread stack. */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
movq %rsp, %rdi
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI
@@ -1244,7 +1261,11 @@ ENTRY(paranoid_entry)
js 1f /* negative -> in kernel */
SWAPGS
xorl %ebx, %ebx
-1: ret
+
+1:
+ SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
+
+ ret
END(paranoid_entry)
/*
@@ -1266,6 +1287,7 @@ ENTRY(paranoid_exit)
testl %ebx, %ebx /* swapgs needed? */
jnz .Lparanoid_exit_no_swapgs
TRACE_IRQS_IRETQ
+ RESTORE_CR3 save_reg=%r14
SWAPGS_UNSAFE_STACK
jmp .Lparanoid_exit_restore
.Lparanoid_exit_no_swapgs:
@@ -1293,6 +1315,8 @@ ENTRY(error_entry)
* from user mode due to an IRET fault.
*/
SWAPGS
+ /* We have user CR3. Change to kernel CR3. */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
.Lerror_entry_from_usermode_after_swapgs:
/* Put us onto the real thread stack. */
@@ -1339,6 +1363,7 @@ ENTRY(error_entry)
* .Lgs_change's error handler with kernel gsbase.
*/
SWAPGS
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
jmp .Lerror_entry_done
.Lbstep_iret:
@@ -1348,10 +1373,11 @@ ENTRY(error_entry)
.Lerror_bad_iret:
/*
- * We came from an IRET to user mode, so we have user gsbase.
- * Switch to kernel gsbase:
+ * We came from an IRET to user mode, so we have user
+ * gsbase and CR3. Switch to kernel gsbase and CR3:
*/
SWAPGS
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
/*
* Pretend that the exception came from user mode: set up pt_regs
@@ -1383,6 +1409,10 @@ END(error_exit)
/*
* Runs on exception stack. Xen PV does not go through this path at all,
* so we can use real assembly here.
+ *
+ * Registers:
+ * %r14: Used to save/restore the CR3 of the interrupted context
+ * when PAGE_TABLE_ISOLATION is in use. Do not clobber.
*/
ENTRY(nmi)
UNWIND_HINT_IRET_REGS
@@ -1446,6 +1476,7 @@ ENTRY(nmi)
swapgs
cld
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
movq %rsp, %rdx
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
UNWIND_HINT_IRET_REGS base=%rdx offset=8
@@ -1698,6 +1729,8 @@ end_repeat_nmi:
movq $-1, %rsi
call do_nmi
+ RESTORE_CR3 save_reg=%r14
+
testl %ebx, %ebx /* swapgs needed? */
jnz nmi_restore
nmi_swapgs:
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -49,6 +49,10 @@
ENTRY(entry_SYSENTER_compat)
/* Interrupts are off on entry. */
SWAPGS
+
+ /* We are about to clobber %rsp anyway, clobbering here is OK */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
+
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
/*
@@ -216,6 +220,12 @@ GLOBAL(entry_SYSCALL_compat_after_hwfram
pushq $0 /* pt_regs->r15 = 0 */
/*
+ * We just saved %rdi so it is safe to clobber. It is not
+ * preserved during the C calls inside TRACE_IRQS_OFF anyway.
+ */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
+
+ /*
* User mode is traced as though IRQs are on, and SYSENTER
* turned them off.
*/
@@ -256,10 +266,22 @@ sysret32_from_system_call:
* when the system call started, which is already known to user
* code. We zero R8-R10 to avoid info leaks.
*/
+ movq RSP-ORIG_RAX(%rsp), %rsp
+
+ /*
+ * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored
+ * on the process stack which is not mapped to userspace and
+ * not readable after we SWITCH_TO_USER_CR3. Delay the CR3
+ * switch until after after the last reference to the process
+ * stack.
+ *
+ * %r8 is zeroed before the sysret, thus safe to clobber.
+ */
+ SWITCH_TO_USER_CR3 scratch_reg=%r8
+
xorq %r8, %r8
xorq %r9, %r9
xorq %r10, %r10
- movq RSP-ORIG_RAX(%rsp), %rsp
swapgs
sysretl
END(entry_SYSCALL_compat)
Patches currently in stable-queue which might be from dave.hansen(a)linux.intel.com are
queue-4.14/x86-mm-pti-allow-nx-poison-to-be-set-in-p4d-pgd.patch
queue-4.14/x86-dumpstack-indicate-in-oops-whether-pti-is-configured-and-enabled.patch
queue-4.14/x86-mm-clarify-the-whole-asid-kernel-pcid-user-pcid-naming.patch
queue-4.14/x86-mm-abstract-switching-cr3.patch
queue-4.14/x86-mm-optimize-restore_cr3.patch
queue-4.14/x86-mm-pti-force-entry-through-trampoline-when-pti-active.patch
queue-4.14/x86-entry-align-entry-text-section-to-pmd-boundary.patch
queue-4.14/x86-mm-64-make-a-full-pgd-entry-size-hole-in-the-memory-map.patch
queue-4.14/x86-cpu_entry_area-add-debugstore-entries-to-cpu_entry_area.patch
queue-4.14/x86-mm-pti-share-entry-text-pmd.patch
queue-4.14/x86-ldt-make-the-ldt-mapping-ro.patch
queue-4.14/x86-mm-dump_pagetables-check-user-space-page-table-for-wx-pages.patch
queue-4.14/x86-pti-map-the-vsyscall-page-if-needed.patch
queue-4.14/x86-mm-pti-disable-global-pages-if-page_table_isolation-y.patch
queue-4.14/x86-mm-dump_pagetables-allow-dumping-current-pagetables.patch
queue-4.14/x86-mm-pti-add-infrastructure-for-page-table-isolation.patch
queue-4.14/x86-cpufeatures-add-x86_bug_cpu_insecure.patch
queue-4.14/x86-mm-use-fix-pcid-to-optimize-user-kernel-switches.patch
queue-4.14/x86-mm-use-invpcid-for-__native_flush_tlb_single.patch
queue-4.14/x86-mm-pti-prepare-the-x86-entry-assembly-code-for-entry-exit-cr3-switching.patch
queue-4.14/x86-mm-allow-flushing-for-future-asid-switches.patch
queue-4.14/x86-mm-pti-add-kconfig.patch
queue-4.14/x86-mm-pti-add-mapping-helper-functions.patch
queue-4.14/x86-mm-pti-map-espfix-into-user-space.patch
queue-4.14/x86-mm-pti-share-cpu_entry_area-with-user-space-page-tables.patch
queue-4.14/x86-mm-pti-add-functions-to-clone-kernel-pmds.patch
queue-4.14/x86-pti-add-the-pti-cmdline-option-and-documentation.patch
queue-4.14/x86-events-intel-ds-map-debug-buffers-in-cpu_entry_area.patch
queue-4.14/x86-mm-pti-allocate-a-separate-user-pgd.patch
queue-4.14/x86-pti-put-the-ldt-in-its-own-pgd-if-pti-is-on.patch
queue-4.14/x86-mm-pti-populate-user-pgd.patch
queue-4.14/x86-mm-dump_pagetables-add-page-table-directory-to-the-debugfs-vfs-hierarchy.patch
This is a note to let you know that I've just added the patch titled
x86/mm/pti: Populate user PGD
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-mm-pti-populate-user-pgd.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From fc2fbc8512ed08d1de7720936fd7d2e4ce02c3a2 Mon Sep 17 00:00:00 2001
From: Dave Hansen <dave.hansen(a)linux.intel.com>
Date: Mon, 4 Dec 2017 15:07:40 +0100
Subject: x86/mm/pti: Populate user PGD
From: Dave Hansen <dave.hansen(a)linux.intel.com>
commit fc2fbc8512ed08d1de7720936fd7d2e4ce02c3a2 upstream.
In clone_pgd_range() copy the init user PGDs which cover the kernel half of
the address space, so a process has all the required kernel mappings
visible.
[ tglx: Split out from the big kaiser dump and folded Andys simplification ]
Signed-off-by: Dave Hansen <dave.hansen(a)linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Reviewed-by: Borislav Petkov <bp(a)suse.de>
Cc: Andy Lutomirski <luto(a)kernel.org>
Cc: Boris Ostrovsky <boris.ostrovsky(a)oracle.com>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Brian Gerst <brgerst(a)gmail.com>
Cc: David Laight <David.Laight(a)aculab.com>
Cc: Denys Vlasenko <dvlasenk(a)redhat.com>
Cc: Eduardo Valentin <eduval(a)amazon.com>
Cc: Greg KH <gregkh(a)linuxfoundation.org>
Cc: H. Peter Anvin <hpa(a)zytor.com>
Cc: Josh Poimboeuf <jpoimboe(a)redhat.com>
Cc: Juergen Gross <jgross(a)suse.com>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Will Deacon <will.deacon(a)arm.com>
Cc: aliguori(a)amazon.com
Cc: daniel.gruss(a)iaik.tugraz.at
Cc: hughd(a)google.com
Cc: keescook(a)google.com
Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/include/asm/pgtable.h | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1125,7 +1125,14 @@ static inline int pud_write(pud_t pud)
*/
static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
{
- memcpy(dst, src, count * sizeof(pgd_t));
+ memcpy(dst, src, count * sizeof(pgd_t));
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return;
+ /* Clone the user space pgd as well */
+ memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
+ count * sizeof(pgd_t));
+#endif
}
#define PTE_SHIFT ilog2(PTRS_PER_PTE)
Patches currently in stable-queue which might be from dave.hansen(a)linux.intel.com are
queue-4.14/x86-mm-pti-allow-nx-poison-to-be-set-in-p4d-pgd.patch
queue-4.14/x86-dumpstack-indicate-in-oops-whether-pti-is-configured-and-enabled.patch
queue-4.14/x86-mm-clarify-the-whole-asid-kernel-pcid-user-pcid-naming.patch
queue-4.14/x86-mm-abstract-switching-cr3.patch
queue-4.14/x86-mm-optimize-restore_cr3.patch
queue-4.14/x86-mm-pti-force-entry-through-trampoline-when-pti-active.patch
queue-4.14/x86-entry-align-entry-text-section-to-pmd-boundary.patch
queue-4.14/x86-mm-64-make-a-full-pgd-entry-size-hole-in-the-memory-map.patch
queue-4.14/x86-cpu_entry_area-add-debugstore-entries-to-cpu_entry_area.patch
queue-4.14/x86-mm-pti-share-entry-text-pmd.patch
queue-4.14/x86-ldt-make-the-ldt-mapping-ro.patch
queue-4.14/x86-mm-dump_pagetables-check-user-space-page-table-for-wx-pages.patch
queue-4.14/x86-pti-map-the-vsyscall-page-if-needed.patch
queue-4.14/x86-mm-pti-disable-global-pages-if-page_table_isolation-y.patch
queue-4.14/x86-mm-dump_pagetables-allow-dumping-current-pagetables.patch
queue-4.14/x86-mm-pti-add-infrastructure-for-page-table-isolation.patch
queue-4.14/x86-cpufeatures-add-x86_bug_cpu_insecure.patch
queue-4.14/x86-mm-use-fix-pcid-to-optimize-user-kernel-switches.patch
queue-4.14/x86-mm-use-invpcid-for-__native_flush_tlb_single.patch
queue-4.14/x86-mm-pti-prepare-the-x86-entry-assembly-code-for-entry-exit-cr3-switching.patch
queue-4.14/x86-mm-allow-flushing-for-future-asid-switches.patch
queue-4.14/x86-mm-pti-add-kconfig.patch
queue-4.14/x86-mm-pti-add-mapping-helper-functions.patch
queue-4.14/x86-mm-pti-map-espfix-into-user-space.patch
queue-4.14/x86-mm-pti-share-cpu_entry_area-with-user-space-page-tables.patch
queue-4.14/x86-mm-pti-add-functions-to-clone-kernel-pmds.patch
queue-4.14/x86-pti-add-the-pti-cmdline-option-and-documentation.patch
queue-4.14/x86-events-intel-ds-map-debug-buffers-in-cpu_entry_area.patch
queue-4.14/x86-mm-pti-allocate-a-separate-user-pgd.patch
queue-4.14/x86-pti-put-the-ldt-in-its-own-pgd-if-pti-is-on.patch
queue-4.14/x86-mm-pti-populate-user-pgd.patch
queue-4.14/x86-mm-dump_pagetables-add-page-table-directory-to-the-debugfs-vfs-hierarchy.patch
This is a note to let you know that I've just added the patch titled
x86/mm/pti: Map ESPFIX into user space
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-mm-pti-map-espfix-into-user-space.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 4b6bbe95b87966ba08999574db65c93c5e925a36 Mon Sep 17 00:00:00 2001
From: Andy Lutomirski <luto(a)kernel.org>
Date: Fri, 15 Dec 2017 22:08:18 +0100
Subject: x86/mm/pti: Map ESPFIX into user space
From: Andy Lutomirski <luto(a)kernel.org>
commit 4b6bbe95b87966ba08999574db65c93c5e925a36 upstream.
Map the ESPFIX pages into user space when PTI is enabled.
Signed-off-by: Andy Lutomirski <luto(a)kernel.org>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Brian Gerst <brgerst(a)gmail.com>
Cc: Dave Hansen <dave.hansen(a)linux.intel.com>
Cc: David Laight <David.Laight(a)aculab.com>
Cc: H. Peter Anvin <hpa(a)zytor.com>
Cc: Josh Poimboeuf <jpoimboe(a)redhat.com>
Cc: Juergen Gross <jgross(a)suse.com>
Cc: Kees Cook <keescook(a)chromium.org>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/mm/pti.c | 11 +++++++++++
1 file changed, 11 insertions(+)
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -288,6 +288,16 @@ static void __init pti_clone_user_shared
}
/*
+ * Clone the ESPFIX P4D into the user space visinble page table
+ */
+static void __init pti_setup_espfix64(void)
+{
+#ifdef CONFIG_X86_ESPFIX64
+ pti_clone_p4d(ESPFIX_BASE_ADDR);
+#endif
+}
+
+/*
* Clone the populated PMDs of the entry and irqentry text and force it RO.
*/
static void __init pti_clone_entry_text(void)
@@ -308,4 +318,5 @@ void __init pti_init(void)
pti_clone_user_shared();
pti_clone_entry_text();
+ pti_setup_espfix64();
}
Patches currently in stable-queue which might be from luto(a)kernel.org are
queue-4.14/x86-mm-pti-allow-nx-poison-to-be-set-in-p4d-pgd.patch
queue-4.14/x86-dumpstack-indicate-in-oops-whether-pti-is-configured-and-enabled.patch
queue-4.14/x86-mm-clarify-the-whole-asid-kernel-pcid-user-pcid-naming.patch
queue-4.14/x86-mm-abstract-switching-cr3.patch
queue-4.14/x86-mm-optimize-restore_cr3.patch
queue-4.14/x86-mm-pti-force-entry-through-trampoline-when-pti-active.patch
queue-4.14/x86-entry-align-entry-text-section-to-pmd-boundary.patch
queue-4.14/x86-mm-64-make-a-full-pgd-entry-size-hole-in-the-memory-map.patch
queue-4.14/x86-cpu_entry_area-add-debugstore-entries-to-cpu_entry_area.patch
queue-4.14/x86-mm-pti-share-entry-text-pmd.patch
queue-4.14/x86-ldt-make-the-ldt-mapping-ro.patch
queue-4.14/x86-mm-dump_pagetables-check-user-space-page-table-for-wx-pages.patch
queue-4.14/x86-pti-map-the-vsyscall-page-if-needed.patch
queue-4.14/x86-mm-pti-disable-global-pages-if-page_table_isolation-y.patch
queue-4.14/x86-mm-dump_pagetables-allow-dumping-current-pagetables.patch
queue-4.14/x86-mm-pti-add-infrastructure-for-page-table-isolation.patch
queue-4.14/x86-cpufeatures-add-x86_bug_cpu_insecure.patch
queue-4.14/x86-mm-use-fix-pcid-to-optimize-user-kernel-switches.patch
queue-4.14/x86-mm-use-invpcid-for-__native_flush_tlb_single.patch
queue-4.14/x86-mm-pti-prepare-the-x86-entry-assembly-code-for-entry-exit-cr3-switching.patch
queue-4.14/x86-mm-allow-flushing-for-future-asid-switches.patch
queue-4.14/x86-mm-pti-add-kconfig.patch
queue-4.14/x86-mm-pti-add-mapping-helper-functions.patch
queue-4.14/x86-mm-pti-map-espfix-into-user-space.patch
queue-4.14/x86-mm-pti-share-cpu_entry_area-with-user-space-page-tables.patch
queue-4.14/x86-mm-pti-add-functions-to-clone-kernel-pmds.patch
queue-4.14/x86-pti-add-the-pti-cmdline-option-and-documentation.patch
queue-4.14/x86-events-intel-ds-map-debug-buffers-in-cpu_entry_area.patch
queue-4.14/x86-mm-pti-allocate-a-separate-user-pgd.patch
queue-4.14/x86-pti-put-the-ldt-in-its-own-pgd-if-pti-is-on.patch
queue-4.14/x86-mm-pti-populate-user-pgd.patch
queue-4.14/x86-mm-dump_pagetables-add-page-table-directory-to-the-debugfs-vfs-hierarchy.patch
This is a note to let you know that I've just added the patch titled
x86/mm/pti: Force entry through trampoline when PTI active
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-mm-pti-force-entry-through-trampoline-when-pti-active.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 8d4b067895791ab9fdb1aadfc505f64d71239dd2 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx(a)linutronix.de>
Date: Mon, 4 Dec 2017 15:07:43 +0100
Subject: x86/mm/pti: Force entry through trampoline when PTI active
From: Thomas Gleixner <tglx(a)linutronix.de>
commit 8d4b067895791ab9fdb1aadfc505f64d71239dd2 upstream.
Force the entry through the trampoline only when PTI is active. Otherwise
go through the normal entry code.
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Reviewed-by: Borislav Petkov <bp(a)suse.de>
Cc: Andy Lutomirski <luto(a)kernel.org>
Cc: Boris Ostrovsky <boris.ostrovsky(a)oracle.com>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Brian Gerst <brgerst(a)gmail.com>
Cc: Dave Hansen <dave.hansen(a)linux.intel.com>
Cc: David Laight <David.Laight(a)aculab.com>
Cc: Denys Vlasenko <dvlasenk(a)redhat.com>
Cc: Eduardo Valentin <eduval(a)amazon.com>
Cc: Greg KH <gregkh(a)linuxfoundation.org>
Cc: H. Peter Anvin <hpa(a)zytor.com>
Cc: Josh Poimboeuf <jpoimboe(a)redhat.com>
Cc: Juergen Gross <jgross(a)suse.com>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Will Deacon <will.deacon(a)arm.com>
Cc: aliguori(a)amazon.com
Cc: daniel.gruss(a)iaik.tugraz.at
Cc: hughd(a)google.com
Cc: keescook(a)google.com
Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/kernel/cpu/common.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1339,7 +1339,10 @@ void syscall_init(void)
(entry_SYSCALL_64_trampoline - _entry_trampoline);
wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
- wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline);
+ if (static_cpu_has(X86_FEATURE_PTI))
+ wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline);
+ else
+ wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
#ifdef CONFIG_IA32_EMULATION
wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
Patches currently in stable-queue which might be from tglx(a)linutronix.de are
queue-4.14/x86-mm-pti-allow-nx-poison-to-be-set-in-p4d-pgd.patch
queue-4.14/x86-dumpstack-indicate-in-oops-whether-pti-is-configured-and-enabled.patch
queue-4.14/x86-mm-clarify-the-whole-asid-kernel-pcid-user-pcid-naming.patch
queue-4.14/x86-mm-abstract-switching-cr3.patch
queue-4.14/x86-mm-optimize-restore_cr3.patch
queue-4.14/x86-mm-pti-force-entry-through-trampoline-when-pti-active.patch
queue-4.14/x86-entry-align-entry-text-section-to-pmd-boundary.patch
queue-4.14/x86-mm-64-make-a-full-pgd-entry-size-hole-in-the-memory-map.patch
queue-4.14/x86-cpu_entry_area-add-debugstore-entries-to-cpu_entry_area.patch
queue-4.14/x86-mm-pti-share-entry-text-pmd.patch
queue-4.14/x86-ldt-make-the-ldt-mapping-ro.patch
queue-4.14/x86-mm-dump_pagetables-check-user-space-page-table-for-wx-pages.patch
queue-4.14/x86-pti-map-the-vsyscall-page-if-needed.patch
queue-4.14/x86-mm-pti-disable-global-pages-if-page_table_isolation-y.patch
queue-4.14/x86-mm-dump_pagetables-allow-dumping-current-pagetables.patch
queue-4.14/x86-mm-pti-add-infrastructure-for-page-table-isolation.patch
queue-4.14/x86-cpufeatures-add-x86_bug_cpu_insecure.patch
queue-4.14/x86-mm-use-fix-pcid-to-optimize-user-kernel-switches.patch
queue-4.14/x86-mm-use-invpcid-for-__native_flush_tlb_single.patch
queue-4.14/x86-mm-pti-prepare-the-x86-entry-assembly-code-for-entry-exit-cr3-switching.patch
queue-4.14/x86-mm-allow-flushing-for-future-asid-switches.patch
queue-4.14/x86-mm-pti-add-kconfig.patch
queue-4.14/x86-mm-pti-add-mapping-helper-functions.patch
queue-4.14/x86-mm-pti-map-espfix-into-user-space.patch
queue-4.14/x86-mm-pti-share-cpu_entry_area-with-user-space-page-tables.patch
queue-4.14/x86-mm-pti-add-functions-to-clone-kernel-pmds.patch
queue-4.14/x86-pti-add-the-pti-cmdline-option-and-documentation.patch
queue-4.14/x86-events-intel-ds-map-debug-buffers-in-cpu_entry_area.patch
queue-4.14/x86-mm-pti-allocate-a-separate-user-pgd.patch
queue-4.14/x86-pti-put-the-ldt-in-its-own-pgd-if-pti-is-on.patch
queue-4.14/x86-mm-pti-populate-user-pgd.patch
queue-4.14/x86-mm-dump_pagetables-add-page-table-directory-to-the-debugfs-vfs-hierarchy.patch
This is a note to let you know that I've just added the patch titled
x86/mm/pti: Disable global pages if PAGE_TABLE_ISOLATION=y
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-mm-pti-disable-global-pages-if-page_table_isolation-y.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From c313ec66317d421fb5768d78c56abed2dc862264 Mon Sep 17 00:00:00 2001
From: Dave Hansen <dave.hansen(a)linux.intel.com>
Date: Mon, 4 Dec 2017 15:07:34 +0100
Subject: x86/mm/pti: Disable global pages if PAGE_TABLE_ISOLATION=y
From: Dave Hansen <dave.hansen(a)linux.intel.com>
commit c313ec66317d421fb5768d78c56abed2dc862264 upstream.
Global pages stay in the TLB across context switches. Since all contexts
share the same kernel mapping, these mappings are marked as global pages
so kernel entries in the TLB are not flushed out on a context switch.
But, even having these entries in the TLB opens up something that an
attacker can use, such as the double-page-fault attack:
http://www.ieee-security.org/TC/SP2013/papers/4977a191.pdf
That means that even when PAGE_TABLE_ISOLATION switches page tables
on return to user space the global pages would stay in the TLB cache.
Disable global pages so that kernel TLB entries can be flushed before
returning to user space. This way, all accesses to kernel addresses from
userspace result in a TLB miss independent of the existence of a kernel
mapping.
Suppress global pages via the __supported_pte_mask. The user space
mappings set PAGE_GLOBAL for the minimal kernel mappings which are
required for entry/exit. These mappings are set up manually so the
filtering does not take place.
[ The __supported_pte_mask simplification was written by Thomas Gleixner. ]
Signed-off-by: Dave Hansen <dave.hansen(a)linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Reviewed-by: Borislav Petkov <bp(a)suse.de>
Cc: Andy Lutomirski <luto(a)kernel.org>
Cc: Boris Ostrovsky <boris.ostrovsky(a)oracle.com>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Brian Gerst <brgerst(a)gmail.com>
Cc: David Laight <David.Laight(a)aculab.com>
Cc: Denys Vlasenko <dvlasenk(a)redhat.com>
Cc: Eduardo Valentin <eduval(a)amazon.com>
Cc: Greg KH <gregkh(a)linuxfoundation.org>
Cc: H. Peter Anvin <hpa(a)zytor.com>
Cc: Josh Poimboeuf <jpoimboe(a)redhat.com>
Cc: Juergen Gross <jgross(a)suse.com>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Will Deacon <will.deacon(a)arm.com>
Cc: aliguori(a)amazon.com
Cc: daniel.gruss(a)iaik.tugraz.at
Cc: hughd(a)google.com
Cc: keescook(a)google.com
Cc: linux-mm(a)kvack.org
Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/mm/init.c | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -161,6 +161,12 @@ struct map_range {
static int page_size_mask;
+static void enable_global_pages(void)
+{
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ __supported_pte_mask |= _PAGE_GLOBAL;
+}
+
static void __init probe_page_size_mask(void)
{
/*
@@ -179,11 +185,11 @@ static void __init probe_page_size_mask(
cr4_set_bits_and_update_boot(X86_CR4_PSE);
/* Enable PGE if available */
+ __supported_pte_mask &= ~_PAGE_GLOBAL;
if (boot_cpu_has(X86_FEATURE_PGE)) {
cr4_set_bits_and_update_boot(X86_CR4_PGE);
- __supported_pte_mask |= _PAGE_GLOBAL;
- } else
- __supported_pte_mask &= ~_PAGE_GLOBAL;
+ enable_global_pages();
+ }
/* Enable 1 GB linear kernel mappings if available: */
if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
Patches currently in stable-queue which might be from dave.hansen(a)linux.intel.com are
queue-4.14/x86-mm-pti-allow-nx-poison-to-be-set-in-p4d-pgd.patch
queue-4.14/x86-dumpstack-indicate-in-oops-whether-pti-is-configured-and-enabled.patch
queue-4.14/x86-mm-clarify-the-whole-asid-kernel-pcid-user-pcid-naming.patch
queue-4.14/x86-mm-abstract-switching-cr3.patch
queue-4.14/x86-mm-optimize-restore_cr3.patch
queue-4.14/x86-mm-pti-force-entry-through-trampoline-when-pti-active.patch
queue-4.14/x86-entry-align-entry-text-section-to-pmd-boundary.patch
queue-4.14/x86-mm-64-make-a-full-pgd-entry-size-hole-in-the-memory-map.patch
queue-4.14/x86-cpu_entry_area-add-debugstore-entries-to-cpu_entry_area.patch
queue-4.14/x86-mm-pti-share-entry-text-pmd.patch
queue-4.14/x86-ldt-make-the-ldt-mapping-ro.patch
queue-4.14/x86-mm-dump_pagetables-check-user-space-page-table-for-wx-pages.patch
queue-4.14/x86-pti-map-the-vsyscall-page-if-needed.patch
queue-4.14/x86-mm-pti-disable-global-pages-if-page_table_isolation-y.patch
queue-4.14/x86-mm-dump_pagetables-allow-dumping-current-pagetables.patch
queue-4.14/x86-mm-pti-add-infrastructure-for-page-table-isolation.patch
queue-4.14/x86-cpufeatures-add-x86_bug_cpu_insecure.patch
queue-4.14/x86-mm-use-fix-pcid-to-optimize-user-kernel-switches.patch
queue-4.14/x86-mm-use-invpcid-for-__native_flush_tlb_single.patch
queue-4.14/x86-mm-pti-prepare-the-x86-entry-assembly-code-for-entry-exit-cr3-switching.patch
queue-4.14/x86-mm-allow-flushing-for-future-asid-switches.patch
queue-4.14/x86-mm-pti-add-kconfig.patch
queue-4.14/x86-mm-pti-add-mapping-helper-functions.patch
queue-4.14/x86-mm-pti-map-espfix-into-user-space.patch
queue-4.14/x86-mm-pti-share-cpu_entry_area-with-user-space-page-tables.patch
queue-4.14/x86-mm-pti-add-functions-to-clone-kernel-pmds.patch
queue-4.14/x86-pti-add-the-pti-cmdline-option-and-documentation.patch
queue-4.14/x86-events-intel-ds-map-debug-buffers-in-cpu_entry_area.patch
queue-4.14/x86-mm-pti-allocate-a-separate-user-pgd.patch
queue-4.14/x86-pti-put-the-ldt-in-its-own-pgd-if-pti-is-on.patch
queue-4.14/x86-mm-pti-populate-user-pgd.patch
queue-4.14/x86-mm-dump_pagetables-add-page-table-directory-to-the-debugfs-vfs-hierarchy.patch
This is a note to let you know that I've just added the patch titled
x86/mm/pti: Allow NX poison to be set in p4d/pgd
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-mm-pti-allow-nx-poison-to-be-set-in-p4d-pgd.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 1c4de1ff4fe50453b968579ee86fac3da80dd783 Mon Sep 17 00:00:00 2001
From: Dave Hansen <dave.hansen(a)linux.intel.com>
Date: Mon, 4 Dec 2017 15:07:38 +0100
Subject: x86/mm/pti: Allow NX poison to be set in p4d/pgd
From: Dave Hansen <dave.hansen(a)linux.intel.com>
commit 1c4de1ff4fe50453b968579ee86fac3da80dd783 upstream.
With PAGE_TABLE_ISOLATION the user portion of the kernel page tables is
poisoned with the NX bit so if the entry code exits with the kernel page
tables selected in CR3, userspace crashes.
But doing so trips the p4d/pgd_bad() checks. Make sure it does not do
that.
Signed-off-by: Dave Hansen <dave.hansen(a)linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Reviewed-by: Borislav Petkov <bp(a)suse.de>
Cc: Andy Lutomirski <luto(a)kernel.org>
Cc: Boris Ostrovsky <boris.ostrovsky(a)oracle.com>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Brian Gerst <brgerst(a)gmail.com>
Cc: David Laight <David.Laight(a)aculab.com>
Cc: Denys Vlasenko <dvlasenk(a)redhat.com>
Cc: Eduardo Valentin <eduval(a)amazon.com>
Cc: Greg KH <gregkh(a)linuxfoundation.org>
Cc: H. Peter Anvin <hpa(a)zytor.com>
Cc: Josh Poimboeuf <jpoimboe(a)redhat.com>
Cc: Juergen Gross <jgross(a)suse.com>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Will Deacon <will.deacon(a)arm.com>
Cc: aliguori(a)amazon.com
Cc: daniel.gruss(a)iaik.tugraz.at
Cc: hughd(a)google.com
Cc: keescook(a)google.com
Cc: linux-kernel(a)vger.kernel.org
Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/include/asm/pgtable.h | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -846,7 +846,12 @@ static inline pud_t *pud_offset(p4d_t *p
static inline int p4d_bad(p4d_t p4d)
{
- return (p4d_flags(p4d) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
+ unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
+
+ if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
+ ignore_flags |= _PAGE_NX;
+
+ return (p4d_flags(p4d) & ~ignore_flags) != 0;
}
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
@@ -880,7 +885,12 @@ static inline p4d_t *p4d_offset(pgd_t *p
static inline int pgd_bad(pgd_t pgd)
{
- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
+ unsigned long ignore_flags = _PAGE_USER;
+
+ if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
+ ignore_flags |= _PAGE_NX;
+
+ return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
}
static inline int pgd_none(pgd_t pgd)
Patches currently in stable-queue which might be from dave.hansen(a)linux.intel.com are
queue-4.14/x86-mm-pti-allow-nx-poison-to-be-set-in-p4d-pgd.patch
queue-4.14/x86-dumpstack-indicate-in-oops-whether-pti-is-configured-and-enabled.patch
queue-4.14/x86-mm-clarify-the-whole-asid-kernel-pcid-user-pcid-naming.patch
queue-4.14/x86-mm-abstract-switching-cr3.patch
queue-4.14/x86-mm-optimize-restore_cr3.patch
queue-4.14/x86-mm-pti-force-entry-through-trampoline-when-pti-active.patch
queue-4.14/x86-entry-align-entry-text-section-to-pmd-boundary.patch
queue-4.14/x86-mm-64-make-a-full-pgd-entry-size-hole-in-the-memory-map.patch
queue-4.14/x86-cpu_entry_area-add-debugstore-entries-to-cpu_entry_area.patch
queue-4.14/x86-mm-pti-share-entry-text-pmd.patch
queue-4.14/x86-ldt-make-the-ldt-mapping-ro.patch
queue-4.14/x86-mm-dump_pagetables-check-user-space-page-table-for-wx-pages.patch
queue-4.14/x86-pti-map-the-vsyscall-page-if-needed.patch
queue-4.14/x86-mm-pti-disable-global-pages-if-page_table_isolation-y.patch
queue-4.14/x86-mm-dump_pagetables-allow-dumping-current-pagetables.patch
queue-4.14/x86-mm-pti-add-infrastructure-for-page-table-isolation.patch
queue-4.14/x86-cpufeatures-add-x86_bug_cpu_insecure.patch
queue-4.14/x86-mm-use-fix-pcid-to-optimize-user-kernel-switches.patch
queue-4.14/x86-mm-use-invpcid-for-__native_flush_tlb_single.patch
queue-4.14/x86-mm-pti-prepare-the-x86-entry-assembly-code-for-entry-exit-cr3-switching.patch
queue-4.14/x86-mm-allow-flushing-for-future-asid-switches.patch
queue-4.14/x86-mm-pti-add-kconfig.patch
queue-4.14/x86-mm-pti-add-mapping-helper-functions.patch
queue-4.14/x86-mm-pti-map-espfix-into-user-space.patch
queue-4.14/x86-mm-pti-share-cpu_entry_area-with-user-space-page-tables.patch
queue-4.14/x86-mm-pti-add-functions-to-clone-kernel-pmds.patch
queue-4.14/x86-pti-add-the-pti-cmdline-option-and-documentation.patch
queue-4.14/x86-events-intel-ds-map-debug-buffers-in-cpu_entry_area.patch
queue-4.14/x86-mm-pti-allocate-a-separate-user-pgd.patch
queue-4.14/x86-pti-put-the-ldt-in-its-own-pgd-if-pti-is-on.patch
queue-4.14/x86-mm-pti-populate-user-pgd.patch
queue-4.14/x86-mm-dump_pagetables-add-page-table-directory-to-the-debugfs-vfs-hierarchy.patch
This is a note to let you know that I've just added the patch titled
x86/mm/pti: Allocate a separate user PGD
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-mm-pti-allocate-a-separate-user-pgd.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From d9e9a6418065bb376e5de8d93ce346939b9a37a6 Mon Sep 17 00:00:00 2001
From: Dave Hansen <dave.hansen(a)linux.intel.com>
Date: Mon, 4 Dec 2017 15:07:39 +0100
Subject: x86/mm/pti: Allocate a separate user PGD
From: Dave Hansen <dave.hansen(a)linux.intel.com>
commit d9e9a6418065bb376e5de8d93ce346939b9a37a6 upstream.
Kernel page table isolation requires to have two PGDs. One for the kernel,
which contains the full kernel mapping plus the user space mapping and one
for user space which contains the user space mappings and the minimal set
of kernel mappings which are required by the architecture to be able to
transition from and to user space.
Add the necessary preliminaries.
[ tglx: Split out from the big kaiser dump. EFI fixup from Kirill ]
Signed-off-by: Dave Hansen <dave.hansen(a)linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Reviewed-by: Borislav Petkov <bp(a)suse.de>
Cc: Andy Lutomirski <luto(a)kernel.org>
Cc: Boris Ostrovsky <boris.ostrovsky(a)oracle.com>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Brian Gerst <brgerst(a)gmail.com>
Cc: David Laight <David.Laight(a)aculab.com>
Cc: Denys Vlasenko <dvlasenk(a)redhat.com>
Cc: Eduardo Valentin <eduval(a)amazon.com>
Cc: Greg KH <gregkh(a)linuxfoundation.org>
Cc: H. Peter Anvin <hpa(a)zytor.com>
Cc: Josh Poimboeuf <jpoimboe(a)redhat.com>
Cc: Juergen Gross <jgross(a)suse.com>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Will Deacon <will.deacon(a)arm.com>
Cc: aliguori(a)amazon.com
Cc: daniel.gruss(a)iaik.tugraz.at
Cc: hughd(a)google.com
Cc: keescook(a)google.com
Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/include/asm/pgalloc.h | 11 +++++++++++
arch/x86/kernel/head_64.S | 30 +++++++++++++++++++++++++++---
arch/x86/mm/pgtable.c | 5 +++--
arch/x86/platform/efi/efi_64.c | 5 ++++-
4 files changed, 45 insertions(+), 6 deletions(-)
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -30,6 +30,17 @@ static inline void paravirt_release_p4d(
*/
extern gfp_t __userpte_alloc_gfp;
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+/*
+ * Instead of one PGD, we acquire two PGDs. Being order-1, it is
+ * both 8k in size and 8k-aligned. That lets us just flip bit 12
+ * in a pointer to swap between the two 4k halves.
+ */
+#define PGD_ALLOCATION_ORDER 1
+#else
+#define PGD_ALLOCATION_ORDER 0
+#endif
+
/*
* Allocate and free page tables.
*/
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -341,6 +341,27 @@ GLOBAL(early_recursion_flag)
.balign PAGE_SIZE; \
GLOBAL(name)
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+/*
+ * Each PGD needs to be 8k long and 8k aligned. We do not
+ * ever go out to userspace with these, so we do not
+ * strictly *need* the second page, but this allows us to
+ * have a single set_pgd() implementation that does not
+ * need to worry about whether it has 4k or 8k to work
+ * with.
+ *
+ * This ensures PGDs are 8k long:
+ */
+#define PTI_USER_PGD_FILL 512
+/* This ensures they are 8k-aligned: */
+#define NEXT_PGD_PAGE(name) \
+ .balign 2 * PAGE_SIZE; \
+GLOBAL(name)
+#else
+#define NEXT_PGD_PAGE(name) NEXT_PAGE(name)
+#define PTI_USER_PGD_FILL 0
+#endif
+
/* Automate the creation of 1 to 1 mapping pmd entries */
#define PMDS(START, PERM, COUNT) \
i = 0 ; \
@@ -350,13 +371,14 @@ GLOBAL(name)
.endr
__INITDATA
-NEXT_PAGE(early_top_pgt)
+NEXT_PGD_PAGE(early_top_pgt)
.fill 511,8,0
#ifdef CONFIG_X86_5LEVEL
.quad level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
#else
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
#endif
+ .fill PTI_USER_PGD_FILL,8,0
NEXT_PAGE(early_dynamic_pgts)
.fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
@@ -364,13 +386,14 @@ NEXT_PAGE(early_dynamic_pgts)
.data
#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH)
-NEXT_PAGE(init_top_pgt)
+NEXT_PGD_PAGE(init_top_pgt)
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.org init_top_pgt + PGD_PAGE_OFFSET*8, 0
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.org init_top_pgt + PGD_START_KERNEL*8, 0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
+ .fill PTI_USER_PGD_FILL,8,0
NEXT_PAGE(level3_ident_pgt)
.quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
@@ -381,8 +404,9 @@ NEXT_PAGE(level2_ident_pgt)
*/
PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
#else
-NEXT_PAGE(init_top_pgt)
+NEXT_PGD_PAGE(init_top_pgt)
.fill 512,8,0
+ .fill PTI_USER_PGD_FILL,8,0
#endif
#ifdef CONFIG_X86_5LEVEL
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -355,14 +355,15 @@ static inline void _pgd_free(pgd_t *pgd)
kmem_cache_free(pgd_cache, pgd);
}
#else
+
static inline pgd_t *_pgd_alloc(void)
{
- return (pgd_t *)__get_free_page(PGALLOC_GFP);
+ return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
}
static inline void _pgd_free(pgd_t *pgd)
{
- free_page((unsigned long)pgd);
+ free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
}
#endif /* CONFIG_X86_PAE */
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -195,6 +195,9 @@ static pgd_t *efi_pgd;
* because we want to avoid inserting EFI region mappings (EFI_VA_END
* to EFI_VA_START) into the standard kernel page tables. Everything
* else can be shared, see efi_sync_low_kernel_mappings().
+ *
+ * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the
+ * allocation.
*/
int __init efi_alloc_page_tables(void)
{
@@ -207,7 +210,7 @@ int __init efi_alloc_page_tables(void)
return 0;
gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
- efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
+ efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
if (!efi_pgd)
return -ENOMEM;
Patches currently in stable-queue which might be from dave.hansen(a)linux.intel.com are
queue-4.14/x86-mm-pti-allow-nx-poison-to-be-set-in-p4d-pgd.patch
queue-4.14/x86-dumpstack-indicate-in-oops-whether-pti-is-configured-and-enabled.patch
queue-4.14/x86-mm-clarify-the-whole-asid-kernel-pcid-user-pcid-naming.patch
queue-4.14/x86-mm-abstract-switching-cr3.patch
queue-4.14/x86-mm-optimize-restore_cr3.patch
queue-4.14/x86-mm-pti-force-entry-through-trampoline-when-pti-active.patch
queue-4.14/x86-entry-align-entry-text-section-to-pmd-boundary.patch
queue-4.14/x86-mm-64-make-a-full-pgd-entry-size-hole-in-the-memory-map.patch
queue-4.14/x86-cpu_entry_area-add-debugstore-entries-to-cpu_entry_area.patch
queue-4.14/x86-mm-pti-share-entry-text-pmd.patch
queue-4.14/x86-ldt-make-the-ldt-mapping-ro.patch
queue-4.14/x86-mm-dump_pagetables-check-user-space-page-table-for-wx-pages.patch
queue-4.14/x86-pti-map-the-vsyscall-page-if-needed.patch
queue-4.14/x86-mm-pti-disable-global-pages-if-page_table_isolation-y.patch
queue-4.14/x86-mm-dump_pagetables-allow-dumping-current-pagetables.patch
queue-4.14/x86-mm-pti-add-infrastructure-for-page-table-isolation.patch
queue-4.14/x86-cpufeatures-add-x86_bug_cpu_insecure.patch
queue-4.14/x86-mm-use-fix-pcid-to-optimize-user-kernel-switches.patch
queue-4.14/x86-mm-use-invpcid-for-__native_flush_tlb_single.patch
queue-4.14/x86-mm-pti-prepare-the-x86-entry-assembly-code-for-entry-exit-cr3-switching.patch
queue-4.14/x86-mm-allow-flushing-for-future-asid-switches.patch
queue-4.14/x86-mm-pti-add-kconfig.patch
queue-4.14/x86-mm-pti-add-mapping-helper-functions.patch
queue-4.14/x86-mm-pti-map-espfix-into-user-space.patch
queue-4.14/x86-mm-pti-share-cpu_entry_area-with-user-space-page-tables.patch
queue-4.14/x86-mm-pti-add-functions-to-clone-kernel-pmds.patch
queue-4.14/x86-pti-add-the-pti-cmdline-option-and-documentation.patch
queue-4.14/x86-events-intel-ds-map-debug-buffers-in-cpu_entry_area.patch
queue-4.14/x86-mm-pti-allocate-a-separate-user-pgd.patch
queue-4.14/x86-pti-put-the-ldt-in-its-own-pgd-if-pti-is-on.patch
queue-4.14/x86-mm-pti-populate-user-pgd.patch
queue-4.14/x86-mm-dump_pagetables-add-page-table-directory-to-the-debugfs-vfs-hierarchy.patch