This is a note to let you know that I've just added the patch titled
x86/pti: Map the vsyscall page if needed
to the 4.14-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git%3Ba=su...
The filename of the patch is: x86-pti-map-the-vsyscall-page-if-needed.patch and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree, please let stable@vger.kernel.org know about it.
From 85900ea51577e31b186e523c8f4e068c79ecc7d3 Mon Sep 17 00:00:00 2001
From: Andy Lutomirski luto@kernel.org Date: Tue, 12 Dec 2017 07:56:42 -0800 Subject: x86/pti: Map the vsyscall page if needed
From: Andy Lutomirski luto@kernel.org
commit 85900ea51577e31b186e523c8f4e068c79ecc7d3 upstream.
Make VSYSCALLs work fully in PTI mode by mapping them properly to the user space visible page tables.
[ tglx: Hide unused functions (Patch by Arnd Bergmann) ]
Signed-off-by: Andy Lutomirski luto@kernel.org Signed-off-by: Thomas Gleixner tglx@linutronix.de Cc: Borislav Petkov bp@alien8.de Cc: Brian Gerst brgerst@gmail.com Cc: Dave Hansen dave.hansen@linux.intel.com Cc: David Laight David.Laight@aculab.com Cc: H. Peter Anvin hpa@zytor.com Cc: Josh Poimboeuf jpoimboe@redhat.com Cc: Juergen Gross jgross@suse.com Cc: Kees Cook keescook@chromium.org Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Peter Zijlstra peterz@infradead.org Signed-off-by: Ingo Molnar mingo@kernel.org Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org
--- arch/x86/entry/vsyscall/vsyscall_64.c | 6 +-- arch/x86/include/asm/vsyscall.h | 1 arch/x86/mm/pti.c | 65 ++++++++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+), 3 deletions(-)
--- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -344,14 +344,14 @@ int in_gate_area_no_mm(unsigned long add * vsyscalls but leave the page not present. If so, we skip calling * this. */ -static void __init set_vsyscall_pgtable_user_bits(void) +void __init set_vsyscall_pgtable_user_bits(pgd_t *root) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd;
- pgd = pgd_offset_k(VSYSCALL_ADDR); + pgd = pgd_offset_pgd(root, VSYSCALL_ADDR); set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER)); p4d = p4d_offset(pgd, VSYSCALL_ADDR); #if CONFIG_PGTABLE_LEVELS >= 5 @@ -373,7 +373,7 @@ void __init map_vsyscall(void) vsyscall_mode == NATIVE ? PAGE_KERNEL_VSYSCALL : PAGE_KERNEL_VVAR); - set_vsyscall_pgtable_user_bits(); + set_vsyscall_pgtable_user_bits(swapper_pg_dir); }
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != --- a/arch/x86/include/asm/vsyscall.h +++ b/arch/x86/include/asm/vsyscall.h @@ -7,6 +7,7 @@
#ifdef CONFIG_X86_VSYSCALL_EMULATION extern void map_vsyscall(void); +extern void set_vsyscall_pgtable_user_bits(pgd_t *root);
/* * Called on instruction fetch fault in vsyscall page. --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -38,6 +38,7 @@
#include <asm/cpufeature.h> #include <asm/hypervisor.h> +#include <asm/vsyscall.h> #include <asm/cmdline.h> #include <asm/pti.h> #include <asm/pgtable.h> @@ -223,6 +224,69 @@ static pmd_t *pti_user_pagetable_walk_pm return pmd_offset(pud, address); }
+#ifdef CONFIG_X86_VSYSCALL_EMULATION +/* + * Walk the shadow copy of the page tables (optionally) trying to allocate + * page table pages on the way down. Does not support large pages. + * + * Note: this is only used when mapping *new* kernel data into the + * user/shadow page tables. It is never used for userspace data. + * + * Returns a pointer to a PTE on success, or NULL on failure. + */ +static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address) +{ + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + pmd_t *pmd = pti_user_pagetable_walk_pmd(address); + pte_t *pte; + + /* We can't do anything sensible if we hit a large mapping. */ + if (pmd_large(*pmd)) { + WARN_ON(1); + return NULL; + } + + if (pmd_none(*pmd)) { + unsigned long new_pte_page = __get_free_page(gfp); + if (!new_pte_page) + return NULL; + + if (pmd_none(*pmd)) { + set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page))); + new_pte_page = 0; + } + if (new_pte_page) + free_page(new_pte_page); + } + + pte = pte_offset_kernel(pmd, address); + if (pte_flags(*pte) & _PAGE_USER) { + WARN_ONCE(1, "attempt to walk to user pte\n"); + return NULL; + } + return pte; +} + +static void __init pti_setup_vsyscall(void) +{ + pte_t *pte, *target_pte; + unsigned int level; + + pte = lookup_address(VSYSCALL_ADDR, &level); + if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte)) + return; + + target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR); + if (WARN_ON(!target_pte)) + return; + + *target_pte = *pte; + set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir)); +} +#else +static void __init pti_setup_vsyscall(void) { } +#endif + static void __init pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear) { @@ -319,4 +383,5 @@ void __init pti_init(void) pti_clone_user_shared(); pti_clone_entry_text(); pti_setup_espfix64(); + pti_setup_vsyscall(); }
Patches currently in stable-queue which might be from luto@kernel.org are
queue-4.14/x86-mm-pti-allow-nx-poison-to-be-set-in-p4d-pgd.patch queue-4.14/x86-dumpstack-indicate-in-oops-whether-pti-is-configured-and-enabled.patch queue-4.14/x86-mm-clarify-the-whole-asid-kernel-pcid-user-pcid-naming.patch queue-4.14/x86-mm-abstract-switching-cr3.patch queue-4.14/x86-mm-optimize-restore_cr3.patch queue-4.14/x86-mm-pti-force-entry-through-trampoline-when-pti-active.patch queue-4.14/x86-entry-align-entry-text-section-to-pmd-boundary.patch queue-4.14/x86-mm-64-make-a-full-pgd-entry-size-hole-in-the-memory-map.patch queue-4.14/x86-cpu_entry_area-add-debugstore-entries-to-cpu_entry_area.patch queue-4.14/x86-mm-pti-share-entry-text-pmd.patch queue-4.14/x86-ldt-make-the-ldt-mapping-ro.patch queue-4.14/x86-mm-dump_pagetables-check-user-space-page-table-for-wx-pages.patch queue-4.14/x86-pti-map-the-vsyscall-page-if-needed.patch queue-4.14/x86-mm-pti-disable-global-pages-if-page_table_isolation-y.patch queue-4.14/x86-mm-dump_pagetables-allow-dumping-current-pagetables.patch queue-4.14/x86-mm-pti-add-infrastructure-for-page-table-isolation.patch queue-4.14/x86-cpufeatures-add-x86_bug_cpu_insecure.patch queue-4.14/x86-mm-use-fix-pcid-to-optimize-user-kernel-switches.patch queue-4.14/x86-mm-use-invpcid-for-__native_flush_tlb_single.patch queue-4.14/x86-mm-pti-prepare-the-x86-entry-assembly-code-for-entry-exit-cr3-switching.patch queue-4.14/x86-mm-allow-flushing-for-future-asid-switches.patch queue-4.14/x86-mm-pti-add-kconfig.patch queue-4.14/x86-mm-pti-add-mapping-helper-functions.patch queue-4.14/x86-mm-pti-map-espfix-into-user-space.patch queue-4.14/x86-mm-pti-share-cpu_entry_area-with-user-space-page-tables.patch queue-4.14/x86-mm-pti-add-functions-to-clone-kernel-pmds.patch queue-4.14/x86-pti-add-the-pti-cmdline-option-and-documentation.patch queue-4.14/x86-events-intel-ds-map-debug-buffers-in-cpu_entry_area.patch queue-4.14/x86-mm-pti-allocate-a-separate-user-pgd.patch queue-4.14/x86-pti-put-the-ldt-in-its-own-pgd-if-pti-is-on.patch queue-4.14/x86-mm-pti-populate-user-pgd.patch queue-4.14/x86-mm-dump_pagetables-add-page-table-directory-to-the-debugfs-vfs-hierarchy.patch
linux-stable-mirror@lists.linaro.org