This is a note to let you know that I've just added the patch titled
x86/mm/pat: Ensure cpa->pfn only contains page frame numbers
to the 4.4-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git%3Ba=su...
The filename of the patch is: x86-mm-pat-ensure-cpa-pfn-only-contains-page-frame-numbers.patch and it can be found in the queue-4.4 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree, please let stable@vger.kernel.org know about it.
From edc3b9129cecd0f0857112136f5b8b1bc1d45918 Mon Sep 17 00:00:00 2001
From: Matt Fleming matt@codeblueprint.co.uk Date: Fri, 27 Nov 2015 21:09:31 +0000 Subject: x86/mm/pat: Ensure cpa->pfn only contains page frame numbers
From: Matt Fleming matt@codeblueprint.co.uk
commit edc3b9129cecd0f0857112136f5b8b1bc1d45918 upstream.
The x86 pageattr code is confused about the data that is stored in cpa->pfn, sometimes it's treated as a page frame number, sometimes it's treated as an unshifted physical address, and in one place it's treated as a pte.
The result of this is that the mapping functions do not map the intended physical address.
This isn't a problem in practice because most of the addresses we're mapping in the EFI code paths are already mapped in 'trampoline_pgd' and so the pageattr mapping functions don't actually do anything in this case. But when we move to using a separate page table for the EFI runtime this will be an issue.
Signed-off-by: Matt Fleming matt@codeblueprint.co.uk Reviewed-by: Borislav Petkov bp@suse.de Acked-by: Borislav Petkov bp@suse.de Cc: Andy Lutomirski luto@amacapital.net Cc: Ard Biesheuvel ard.biesheuvel@linaro.org Cc: Borislav Petkov bp@alien8.de Cc: Brian Gerst brgerst@gmail.com Cc: Dave Hansen dave.hansen@intel.com Cc: Denys Vlasenko dvlasenk@redhat.com Cc: H. Peter Anvin hpa@zytor.com Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Peter Zijlstra peterz@infradead.org Cc: Sai Praneeth Prakhya sai.praneeth.prakhya@intel.com Cc: Thomas Gleixner tglx@linutronix.de Cc: Toshi Kani toshi.kani@hp.com Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/1448658575-17029-3-git-send-email-matt@codeblueprin... Signed-off-by: Ingo Molnar mingo@kernel.org Cc: "Ghannam, Yazen" Yazen.Ghannam@amd.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org
--- arch/x86/mm/pageattr.c | 17 ++++++----------- arch/x86/platform/efi/efi_64.c | 16 ++++++++++------ 2 files changed, 16 insertions(+), 17 deletions(-)
--- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -911,15 +911,10 @@ static void populate_pte(struct cpa_data pte = pte_offset_kernel(pmd, start);
while (num_pages-- && start < end) { - - /* deal with the NX bit */ - if (!(pgprot_val(pgprot) & _PAGE_NX)) - cpa->pfn &= ~_PAGE_NX; - - set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot)); + set_pte(pte, pfn_pte(cpa->pfn, pgprot));
start += PAGE_SIZE; - cpa->pfn += PAGE_SIZE; + cpa->pfn++; pte++; } } @@ -975,11 +970,11 @@ static int populate_pmd(struct cpa_data
pmd = pmd_offset(pud, start);
- set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | + set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | massage_pgprot(pmd_pgprot)));
start += PMD_SIZE; - cpa->pfn += PMD_SIZE; + cpa->pfn += PMD_SIZE >> PAGE_SHIFT; cur_pages += PMD_SIZE >> PAGE_SHIFT; }
@@ -1048,11 +1043,11 @@ static int populate_pud(struct cpa_data * Map everything starting from the Gb boundary, possibly with 1G pages */ while (end - start >= PUD_SIZE) { - set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | + set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | massage_pgprot(pud_pgprot)));
start += PUD_SIZE; - cpa->pfn += PUD_SIZE; + cpa->pfn += PUD_SIZE >> PAGE_SHIFT; cur_pages += PUD_SIZE >> PAGE_SHIFT; pud++; } --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -143,7 +143,7 @@ void efi_sync_low_kernel_mappings(void)
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) { - unsigned long text; + unsigned long pfn, text; struct page *page; unsigned npages; pgd_t *pgd; @@ -160,7 +160,8 @@ int __init efi_setup_page_tables(unsigne * and ident-map those pages containing the map before calling * phys_efi_set_virtual_address_map(). */ - if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) { + pfn = pa_memmap >> PAGE_SHIFT; + if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX)) { pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap); return 1; } @@ -185,8 +186,9 @@ int __init efi_setup_page_tables(unsigne
npages = (_end - _text) >> PAGE_SHIFT; text = __pa(_text); + pfn = text >> PAGE_SHIFT;
- if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) { + if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, 0)) { pr_err("Failed to map kernel text 1:1\n"); return 1; } @@ -204,12 +206,14 @@ void __init efi_cleanup_page_tables(unsi static void __init __map_region(efi_memory_desc_t *md, u64 va) { pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); - unsigned long pf = 0; + unsigned long flags = 0; + unsigned long pfn;
if (!(md->attribute & EFI_MEMORY_WB)) - pf |= _PAGE_PCD; + flags |= _PAGE_PCD;
- if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf)) + pfn = md->phys_addr >> PAGE_SHIFT; + if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags)) pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", md->phys_addr, va); }
Patches currently in stable-queue which might be from matt@codeblueprint.co.uk are
queue-4.4/x86-mm-pat-ensure-cpa-pfn-only-contains-page-frame-numbers.patch queue-4.4/x86-efi-hoist-page-table-switching-code-into-efi_call_virt.patch queue-4.4/x86-efi-build-our-own-page-table-structures.patch