This is a note to let you know that I've just added the patch titled
x86/mm: Pass flush_tlb_info to flush_tlb_others() etc
to the 4.9-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git%3Ba=su...
The filename of the patch is: x86-mm-pass-flush_tlb_info-to-flush_tlb_others-etc.patch and it can be found in the queue-4.9 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree, please let stable@vger.kernel.org know about it.
From a2055abe9c6789cedef29abbdaa488a087faccc3 Mon Sep 17 00:00:00 2001
From: Andy Lutomirski luto@kernel.org Date: Sun, 28 May 2017 10:00:10 -0700 Subject: x86/mm: Pass flush_tlb_info to flush_tlb_others() etc
From: Andy Lutomirski luto@kernel.org
commit a2055abe9c6789cedef29abbdaa488a087faccc3 upstream.
Rather than passing all the contents of flush_tlb_info to flush_tlb_others(), pass a pointer to the structure directly. For consistency, this also removes the unnecessary cpu parameter from uv_flush_tlb_others() to make its signature match the other *flush_tlb_others() functions.
This serves two purposes:
- It will dramatically simplify future patches that change struct flush_tlb_info, which I'm planning to do.
- struct flush_tlb_info is an adequate description of what to do for a local flush, too, so by reusing it we can remove duplicated code between local and remove flushes in a future patch.
Signed-off-by: Andy Lutomirski luto@kernel.org Acked-by: Rik van Riel riel@redhat.com Cc: Andrew Morton akpm@linux-foundation.org Cc: Borislav Petkov bpetkov@suse.de Cc: Dave Hansen dave.hansen@intel.com Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Mel Gorman mgorman@suse.de Cc: Michal Hocko mhocko@suse.com Cc: Nadav Amit nadav.amit@gmail.com Cc: Nadav Amit namit@vmware.com Cc: Peter Zijlstra peterz@infradead.org Cc: Rik van Riel riel@redhat.com Cc: Thomas Gleixner tglx@linutronix.de Cc: linux-mm@kvack.org [ Fix build warning. ] Signed-off-by: Ingo Molnar mingo@kernel.org Signed-off-by: Eduardo Valentin eduval@amazon.com Signed-off-by: Eduardo Valentin edubezval@gmail.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org --- arch/x86/include/asm/paravirt.h | 6 -- arch/x86/include/asm/paravirt_types.h | 5 -- arch/x86/include/asm/tlbflush.h | 19 +++++--- arch/x86/include/asm/uv/uv.h | 11 ++--- arch/x86/mm/tlb.c | 72 ++++++++++++++++++---------------- arch/x86/platform/uv/tlb_uv.c | 10 +--- arch/x86/xen/mmu.c | 10 ++-- 7 files changed, 68 insertions(+), 65 deletions(-)
--- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -317,11 +317,9 @@ static inline void __flush_tlb_single(un }
static inline void flush_tlb_others(const struct cpumask *cpumask, - struct mm_struct *mm, - unsigned long start, - unsigned long end) + const struct flush_tlb_info *info) { - PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end); + PVOP_VCALL2(pv_mmu_ops.flush_tlb_others, cpumask, info); }
static inline int paravirt_pgd_alloc(struct mm_struct *mm) --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -51,6 +51,7 @@ struct mm_struct; struct desc_struct; struct task_struct; struct cpumask; +struct flush_tlb_info;
/* * Wrapper type for pointers to code which uses the non-standard @@ -225,9 +226,7 @@ struct pv_mmu_ops { void (*flush_tlb_kernel)(void); void (*flush_tlb_single)(unsigned long addr); void (*flush_tlb_others)(const struct cpumask *cpus, - struct mm_struct *mm, - unsigned long start, - unsigned long end); + const struct flush_tlb_info *info);
/* Hooks for allocating and freeing a pagetable top-level */ int (*pgd_alloc)(struct mm_struct *mm); --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -211,12 +211,18 @@ static inline void __flush_tlb_one(unsig * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus + * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus * * ..but the i386 has somewhat limited tlb flushing capabilities, * and page-granular flushes are available only on i486 and up. */
+struct flush_tlb_info { + struct mm_struct *mm; + unsigned long start; + unsigned long end; +}; + #ifndef CONFIG_SMP
/* "_up" is for UniProcessor. @@ -275,9 +281,7 @@ static inline void flush_tlb_mm_range(st }
static inline void native_flush_tlb_others(const struct cpumask *cpumask, - struct mm_struct *mm, - unsigned long start, - unsigned long end) + const struct flush_tlb_info *info) { }
@@ -315,8 +319,7 @@ static inline void flush_tlb_page(struct }
void native_flush_tlb_others(const struct cpumask *cpumask, - struct mm_struct *mm, - unsigned long start, unsigned long end); + const struct flush_tlb_info *info);
#define TLBSTATE_OK 1 #define TLBSTATE_LAZY 2 @@ -338,8 +341,8 @@ extern void arch_tlbbatch_flush(struct a #endif /* SMP */
#ifndef CONFIG_PARAVIRT -#define flush_tlb_others(mask, mm, start, end) \ - native_flush_tlb_others(mask, mm, start, end) +#define flush_tlb_others(mask, info) \ + native_flush_tlb_others(mask, info) #endif
#endif /* _ASM_X86_TLBFLUSH_H */ --- a/arch/x86/include/asm/uv/uv.h +++ b/arch/x86/include/asm/uv/uv.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_UV_UV_H #define _ASM_X86_UV_UV_H
+#include <asm/tlbflush.h> + enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
struct cpumask; @@ -14,10 +16,7 @@ extern void uv_cpu_init(void); extern void uv_nmi_init(void); extern void uv_system_init(void); extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, - struct mm_struct *mm, - unsigned long start, - unsigned long end, - unsigned int cpu); + const struct flush_tlb_info *info);
#else /* X86_UV */
@@ -26,8 +25,8 @@ static inline int is_uv_system(void) { r static inline void uv_cpu_init(void) { } static inline void uv_system_init(void) { } static inline const struct cpumask * -uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, - unsigned long start, unsigned long end, unsigned int cpu) +uv_flush_tlb_others(const struct cpumask *cpumask, + const struct flush_tlb_info *info) { return cpumask; }
#endif /* X86_UV */ --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -30,12 +30,6 @@
#ifdef CONFIG_SMP
-struct flush_tlb_info { - struct mm_struct *flush_mm; - unsigned long flush_start; - unsigned long flush_end; -}; - /* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. @@ -229,11 +223,11 @@ void switch_mm_irqs_off(struct mm_struct */ static void flush_tlb_func(void *info) { - struct flush_tlb_info *f = info; + const struct flush_tlb_info *f = info;
inc_irq_stat(irq_tlb_count);
- if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) + if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.active_mm)) return;
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); @@ -243,15 +237,15 @@ static void flush_tlb_func(void *info) return; }
- if (f->flush_end == TLB_FLUSH_ALL) { + if (f->end == TLB_FLUSH_ALL) { local_flush_tlb(); trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); } else { unsigned long addr; unsigned long nr_pages = - (f->flush_end - f->flush_start) / PAGE_SIZE; - addr = f->flush_start; - while (addr < f->flush_end) { + (f->end - f->start) / PAGE_SIZE; + addr = f->start; + while (addr < f->end) { __flush_tlb_single(addr); addr += PAGE_SIZE; } @@ -260,38 +254,38 @@ static void flush_tlb_func(void *info) }
void native_flush_tlb_others(const struct cpumask *cpumask, - struct mm_struct *mm, unsigned long start, - unsigned long end) + const struct flush_tlb_info *info) { - struct flush_tlb_info info; - - info.flush_mm = mm; - info.flush_start = start; - info.flush_end = end; - count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); - if (end == TLB_FLUSH_ALL) + if (info->end == TLB_FLUSH_ALL) trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL); else trace_tlb_flush(TLB_REMOTE_SEND_IPI, - (end - start) >> PAGE_SHIFT); + (info->end - info->start) >> PAGE_SHIFT);
if (is_uv_system()) { unsigned int cpu;
cpu = smp_processor_id(); - cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); + cpumask = uv_flush_tlb_others(cpumask, info); if (cpumask) smp_call_function_many(cpumask, flush_tlb_func, - &info, 1); + (void *)info, 1); return; } - smp_call_function_many(cpumask, flush_tlb_func, &info, 1); + smp_call_function_many(cpumask, flush_tlb_func, + (void *)info, 1); }
void flush_tlb_current_task(void) { struct mm_struct *mm = current->mm; + struct flush_tlb_info info = { + .mm = mm, + .start = 0UL, + .end = TLB_FLUSH_ALL, + }; +
preempt_disable();
@@ -302,7 +296,7 @@ void flush_tlb_current_task(void)
trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); + flush_tlb_others(mm_cpumask(mm), &info); preempt_enable(); }
@@ -322,6 +316,7 @@ void flush_tlb_mm_range(struct mm_struct unsigned long end, unsigned long vmflag) { unsigned long addr; + struct flush_tlb_info info; /* do a global flush by default */ unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
@@ -362,15 +357,20 @@ void flush_tlb_mm_range(struct mm_struct } trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); out: + info.mm = mm; if (base_pages_to_flush == TLB_FLUSH_ALL) { - start = 0UL; - end = TLB_FLUSH_ALL; + info.start = 0UL; + info.end = TLB_FLUSH_ALL; + } else { + info.start = start; + info.end = end; } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(mm_cpumask(mm), mm, start, end); + flush_tlb_others(mm_cpumask(mm), &info); preempt_enable(); }
+ static void do_flush_tlb_all(void *info) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); @@ -391,7 +391,7 @@ static void do_kernel_range_flush(void * unsigned long addr;
/* flush range by one by one 'invlpg' */ - for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) + for (addr = f->start; addr < f->end; addr += PAGE_SIZE) __flush_tlb_single(addr); }
@@ -404,14 +404,20 @@ void flush_tlb_kernel_range(unsigned lon on_each_cpu(do_flush_tlb_all, NULL, 1); } else { struct flush_tlb_info info; - info.flush_start = start; - info.flush_end = end; + info.start = start; + info.end = end; on_each_cpu(do_kernel_range_flush, &info, 1); } }
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) { + struct flush_tlb_info info = { + .mm = NULL, + .start = 0UL, + .end = TLB_FLUSH_ALL, + }; + int cpu = get_cpu();
if (cpumask_test_cpu(cpu, &batch->cpumask)) { @@ -421,7 +427,7 @@ void arch_tlbbatch_flush(struct arch_tlb }
if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) - flush_tlb_others(&batch->cpumask, NULL, 0, TLB_FLUSH_ALL); + flush_tlb_others(&batch->cpumask, &info); cpumask_clear(&batch->cpumask);
put_cpu(); --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -1110,11 +1110,9 @@ static int set_distrib_bits(struct cpuma * done. The returned pointer is valid till preemption is re-enabled. */ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, - struct mm_struct *mm, - unsigned long start, - unsigned long end, - unsigned int cpu) + const struct flush_tlb_info *info) { + unsigned int cpu = smp_processor_id(); int locals = 0; int remotes = 0; int hubs = 0; @@ -1171,8 +1169,8 @@ const struct cpumask *uv_flush_tlb_other
record_send_statistics(stat, locals, hubs, remotes, bau_desc);
- if (!end || (end - start) <= PAGE_SIZE) - bau_desc->payload.address = start; + if (!info->end || (info->end - info->start) <= PAGE_SIZE) + bau_desc->payload.address = info->start; else bau_desc->payload.address = TLB_FLUSH_ALL; bau_desc->payload.sending_cpu = cpu; --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1372,8 +1372,7 @@ static void xen_flush_tlb_single(unsigne }
static void xen_flush_tlb_others(const struct cpumask *cpus, - struct mm_struct *mm, unsigned long start, - unsigned long end) + const struct flush_tlb_info *info) { struct { struct mmuext_op op; @@ -1385,7 +1384,7 @@ static void xen_flush_tlb_others(const s } *args; struct multicall_space mcs;
- trace_xen_mmu_flush_tlb_others(cpus, mm, start, end); + trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
if (cpumask_empty(cpus)) return; /* nothing to do */ @@ -1399,9 +1398,10 @@ static void xen_flush_tlb_others(const s cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; - if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { + if (info->end != TLB_FLUSH_ALL && + (info->end - info->start) <= PAGE_SIZE) { args->op.cmd = MMUEXT_INVLPG_MULTI; - args->op.arg1.linear_addr = start; + args->op.arg1.linear_addr = info->start; }
MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
Patches currently in stable-queue which might be from luto@kernel.org are
queue-4.9/x86-mm-refactor-flush_tlb_mm_range-to-merge-local-and-remote-cases.patch queue-4.9/x86-mm-pass-flush_tlb_info-to-flush_tlb_others-etc.patch queue-4.9/x86-mm-rework-lazy-tlb-to-track-the-actual-loaded-mm.patch queue-4.9/x86-mm-kvm-teach-kvm-s-vmx-code-that-cr3-isn-t-a-constant.patch queue-4.9/x86-mm-use-new-merged-flush-logic-in-arch_tlbbatch_flush.patch queue-4.9/x86-kvm-vmx-simplify-segment_base.patch queue-4.9/x86-entry-unwind-create-stack-frames-for-saved-interrupt-registers.patch queue-4.9/x86-mm-reduce-indentation-in-flush_tlb_func.patch queue-4.9/x86-mm-remove-the-up-asm-tlbflush.h-code-always-use-the-formerly-smp-code.patch queue-4.9/x86-mm-reimplement-flush_tlb_page-using-flush_tlb_mm_range.patch queue-4.9/mm-x86-mm-make-the-batched-unmap-tlb-flush-api-more-generic.patch queue-4.9/x86-kvm-vmx-defer-tr-reload-after-vm-exit.patch queue-4.9/x86-mm-change-the-leave_mm-condition-for-local-tlb-flushes.patch queue-4.9/x86-mm-be-more-consistent-wrt-page_shift-vs-page_size-in-tlb-flush-code.patch
linux-stable-mirror@lists.linaro.org