The patch below does not apply to the 5.14-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
From dfb5c1e12c28e35e4d4e5bc8022b0e9d585b89a7 Mon Sep 17 00:00:00 2001
From: Wei Liu wei.liu@kernel.org Date: Fri, 10 Sep 2021 18:57:14 +0000 Subject: [PATCH] x86/hyperv: remove on-stack cpumask from hv_send_ipi_mask_allbutself
It is not a good practice to allocate a cpumask on stack, given it may consume up to 1 kilobytes of stack space if the kernel is configured to have 8192 cpus.
The internal helper functions __send_ipi_mask{,_ex} need to loop over the provided mask anyway, so it is not too difficult to skip `self' there. We can thus do away with the on-stack cpumask in hv_send_ipi_mask_allbutself.
Adjust call sites of __send_ipi_mask as needed.
Reported-by: Linus Torvalds torvalds@linux-foundation.org Suggested-by: Michael Kelley mikelley@microsoft.com Suggested-by: Linus Torvalds torvalds@linux-foundation.org Fixes: 68bb7bfb7985d ("X86/Hyper-V: Enable IPI enlightenments") Signed-off-by: Wei Liu wei.liu@kernel.org Reviewed-by: Michael Kelley mikelley@microsoft.com Link: https://lore.kernel.org/r/20210910185714.299411-3-wei.liu@kernel.org
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c index 90e682a92820..32a1ad356c18 100644 --- a/arch/x86/hyperv/hv_apic.c +++ b/arch/x86/hyperv/hv_apic.c @@ -99,7 +99,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val) /* * IPI implementation on Hyper-V. */ -static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) +static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector, + bool exclude_self) { struct hv_send_ipi_ex **arg; struct hv_send_ipi_ex *ipi_arg; @@ -123,7 +124,10 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
if (!cpumask_equal(mask, cpu_present_mask)) { ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K; - nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); + if (exclude_self) + nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask); + else + nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); } if (nr_bank < 0) goto ipi_mask_ex_done; @@ -138,15 +142,25 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) return hv_result_success(status); }
-static bool __send_ipi_mask(const struct cpumask *mask, int vector) +static bool __send_ipi_mask(const struct cpumask *mask, int vector, + bool exclude_self) { - int cur_cpu, vcpu; + int cur_cpu, vcpu, this_cpu = smp_processor_id(); struct hv_send_ipi ipi_arg; u64 status; + unsigned int weight;
trace_hyperv_send_ipi_mask(mask, vector);
- if (cpumask_empty(mask)) + weight = cpumask_weight(mask); + + /* + * Do nothing if + * 1. the mask is empty + * 2. the mask only contains self when exclude_self is true + */ + if (weight == 0 || + (exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask))) return true;
if (!hv_hypercall_pg) @@ -172,6 +186,8 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector) ipi_arg.cpu_mask = 0;
for_each_cpu(cur_cpu, mask) { + if (exclude_self && cur_cpu == this_cpu) + continue; vcpu = hv_cpu_number_to_vp_number(cur_cpu); if (vcpu == VP_INVAL) return false; @@ -191,7 +207,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector) return hv_result_success(status);
do_ex_hypercall: - return __send_ipi_mask_ex(mask, vector); + return __send_ipi_mask_ex(mask, vector, exclude_self); }
static bool __send_ipi_one(int cpu, int vector) @@ -208,7 +224,7 @@ static bool __send_ipi_one(int cpu, int vector) return false;
if (vp >= 64) - return __send_ipi_mask_ex(cpumask_of(cpu), vector); + return __send_ipi_mask_ex(cpumask_of(cpu), vector, false);
status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp)); return hv_result_success(status); @@ -222,20 +238,13 @@ static void hv_send_ipi(int cpu, int vector)
static void hv_send_ipi_mask(const struct cpumask *mask, int vector) { - if (!__send_ipi_mask(mask, vector)) + if (!__send_ipi_mask(mask, vector, false)) orig_apic.send_IPI_mask(mask, vector); }
static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) { - unsigned int this_cpu = smp_processor_id(); - struct cpumask new_mask; - const struct cpumask *local_mask; - - cpumask_copy(&new_mask, mask); - cpumask_clear_cpu(this_cpu, &new_mask); - local_mask = &new_mask; - if (!__send_ipi_mask(local_mask, vector)) + if (!__send_ipi_mask(mask, vector, true)) orig_apic.send_IPI_mask_allbutself(mask, vector); }
@@ -246,7 +255,7 @@ static void hv_send_ipi_allbutself(int vector)
static void hv_send_ipi_all(int vector) { - if (!__send_ipi_mask(cpu_online_mask, vector)) + if (!__send_ipi_mask(cpu_online_mask, vector, false)) orig_apic.send_IPI_all(vector); }
Hi Greg,
On Mon, Sep 20, 2021 at 10:56:55AM +0200, gregkh@linuxfoundation.org wrote:
The patch below does not apply to the 5.14-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
There is one prerequisite patch for this patch. I can backport both to all relevant stable branches, but I would like to know how you would like to receive backport patches. Several git bundles to stable@? Several two-patch series to stable@? I can also give you several tags / branches to pull from if you like.
Thanks, Wei.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
From dfb5c1e12c28e35e4d4e5bc8022b0e9d585b89a7 Mon Sep 17 00:00:00 2001 From: Wei Liu wei.liu@kernel.org Date: Fri, 10 Sep 2021 18:57:14 +0000 Subject: [PATCH] x86/hyperv: remove on-stack cpumask from hv_send_ipi_mask_allbutself
It is not a good practice to allocate a cpumask on stack, given it may consume up to 1 kilobytes of stack space if the kernel is configured to have 8192 cpus.
The internal helper functions __send_ipi_mask{,_ex} need to loop over the provided mask anyway, so it is not too difficult to skip `self' there. We can thus do away with the on-stack cpumask in hv_send_ipi_mask_allbutself.
Adjust call sites of __send_ipi_mask as needed.
Reported-by: Linus Torvalds torvalds@linux-foundation.org Suggested-by: Michael Kelley mikelley@microsoft.com Suggested-by: Linus Torvalds torvalds@linux-foundation.org Fixes: 68bb7bfb7985d ("X86/Hyper-V: Enable IPI enlightenments") Signed-off-by: Wei Liu wei.liu@kernel.org Reviewed-by: Michael Kelley mikelley@microsoft.com Link: https://lore.kernel.org/r/20210910185714.299411-3-wei.liu@kernel.org
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c index 90e682a92820..32a1ad356c18 100644 --- a/arch/x86/hyperv/hv_apic.c +++ b/arch/x86/hyperv/hv_apic.c @@ -99,7 +99,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val) /*
- IPI implementation on Hyper-V.
*/ -static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) +static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
bool exclude_self)
{ struct hv_send_ipi_ex **arg; struct hv_send_ipi_ex *ipi_arg; @@ -123,7 +124,10 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) if (!cpumask_equal(mask, cpu_present_mask)) { ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
if (exclude_self)
nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask);
else
} if (nr_bank < 0) goto ipi_mask_ex_done;nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
@@ -138,15 +142,25 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) return hv_result_success(status); } -static bool __send_ipi_mask(const struct cpumask *mask, int vector) +static bool __send_ipi_mask(const struct cpumask *mask, int vector,
bool exclude_self)
{
- int cur_cpu, vcpu;
- int cur_cpu, vcpu, this_cpu = smp_processor_id(); struct hv_send_ipi ipi_arg; u64 status;
- unsigned int weight;
trace_hyperv_send_ipi_mask(mask, vector);
- if (cpumask_empty(mask))
- weight = cpumask_weight(mask);
- /*
* Do nothing if
* 1. the mask is empty
* 2. the mask only contains self when exclude_self is true
*/
- if (weight == 0 ||
return true;(exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask)))
if (!hv_hypercall_pg) @@ -172,6 +186,8 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector) ipi_arg.cpu_mask = 0; for_each_cpu(cur_cpu, mask) {
if (exclude_self && cur_cpu == this_cpu)
vcpu = hv_cpu_number_to_vp_number(cur_cpu); if (vcpu == VP_INVAL) return false;continue;
@@ -191,7 +207,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector) return hv_result_success(status); do_ex_hypercall:
- return __send_ipi_mask_ex(mask, vector);
- return __send_ipi_mask_ex(mask, vector, exclude_self);
} static bool __send_ipi_one(int cpu, int vector) @@ -208,7 +224,7 @@ static bool __send_ipi_one(int cpu, int vector) return false; if (vp >= 64)
return __send_ipi_mask_ex(cpumask_of(cpu), vector);
return __send_ipi_mask_ex(cpumask_of(cpu), vector, false);
status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp)); return hv_result_success(status); @@ -222,20 +238,13 @@ static void hv_send_ipi(int cpu, int vector) static void hv_send_ipi_mask(const struct cpumask *mask, int vector) {
- if (!__send_ipi_mask(mask, vector))
- if (!__send_ipi_mask(mask, vector, false)) orig_apic.send_IPI_mask(mask, vector);
} static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) {
- unsigned int this_cpu = smp_processor_id();
- struct cpumask new_mask;
- const struct cpumask *local_mask;
- cpumask_copy(&new_mask, mask);
- cpumask_clear_cpu(this_cpu, &new_mask);
- local_mask = &new_mask;
- if (!__send_ipi_mask(local_mask, vector))
- if (!__send_ipi_mask(mask, vector, true)) orig_apic.send_IPI_mask_allbutself(mask, vector);
} @@ -246,7 +255,7 @@ static void hv_send_ipi_allbutself(int vector) static void hv_send_ipi_all(int vector) {
- if (!__send_ipi_mask(cpu_online_mask, vector))
- if (!__send_ipi_mask(cpu_online_mask, vector, false)) orig_apic.send_IPI_all(vector);
}
On Mon, Sep 20, 2021 at 03:44:47PM +0000, Wei Liu wrote:
Hi Greg,
On Mon, Sep 20, 2021 at 10:56:55AM +0200, gregkh@linuxfoundation.org wrote:
The patch below does not apply to the 5.14-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
There is one prerequisite patch for this patch. I can backport both to all relevant stable branches, but I would like to know how you would like to receive backport patches. Several git bundles to stable@? Several two-patch series to stable@? I can also give you several tags / branches to pull from if you like.
If they cherry-pick cleanly, just the git ids are all we need.
Otherwise a patch series is great, git bundles are only a last-resort.
thanks,
greg k-h
On Mon, Sep 20, 2021 at 05:55:08PM +0200, Greg KH wrote:
On Mon, Sep 20, 2021 at 03:44:47PM +0000, Wei Liu wrote:
Hi Greg,
On Mon, Sep 20, 2021 at 10:56:55AM +0200, gregkh@linuxfoundation.org wrote:
The patch below does not apply to the 5.14-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
There is one prerequisite patch for this patch. I can backport both to all relevant stable branches, but I would like to know how you would like to receive backport patches. Several git bundles to stable@? Several two-patch series to stable@? I can also give you several tags / branches to pull from if you like.
If they cherry-pick cleanly, just the git ids are all we need.
For 5.14, they do -- I tried doing that on linux-5.14.y.
Please cherry-pick these two patches in the order listed for 5.14:
7ad9bb9d0f35 asm-generic/hyperv: provide cpumask_to_vpset_noself dfb5c1e12c28 x86/hyperv: remove on-stack cpumask from hv_send_ipi_mask_allbutself
Otherwise a patch series is great, git bundles are only a last-resort.
I will post some patch series for older stable trees.
Thanks, Wei.
thanks,
greg k-h
On Mon, Sep 20, 2021 at 05:55:08PM +0200, Greg KH wrote:
On Mon, Sep 20, 2021 at 03:44:47PM +0000, Wei Liu wrote:
Hi Greg,
On Mon, Sep 20, 2021 at 10:56:55AM +0200, gregkh@linuxfoundation.org wrote:
The patch below does not apply to the 5.14-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
There is one prerequisite patch for this patch. I can backport both to all relevant stable branches, but I would like to know how you would like to receive backport patches. Several git bundles to stable@? Several two-patch series to stable@? I can also give you several tags / branches to pull from if you like.
If they cherry-pick cleanly, just the git ids are all we need.
Otherwise a patch series is great, git bundles are only a last-resort.
Actually we don't think the problem these patches tries to solve will cause issues in practice, so backporting shouldn't be needed at this point. Please drop this patch from the your queues.
Thanks, Wei.
thanks,
greg k-h
On Mon, Sep 20, 2021 at 04:09:19PM +0000, Wei Liu wrote:
On Mon, Sep 20, 2021 at 05:55:08PM +0200, Greg KH wrote:
On Mon, Sep 20, 2021 at 03:44:47PM +0000, Wei Liu wrote:
Hi Greg,
On Mon, Sep 20, 2021 at 10:56:55AM +0200, gregkh@linuxfoundation.org wrote:
The patch below does not apply to the 5.14-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
There is one prerequisite patch for this patch. I can backport both to all relevant stable branches, but I would like to know how you would like to receive backport patches. Several git bundles to stable@? Several two-patch series to stable@? I can also give you several tags / branches to pull from if you like.
If they cherry-pick cleanly, just the git ids are all we need.
Otherwise a patch series is great, git bundles are only a last-resort.
Actually we don't think the problem these patches tries to solve will cause issues in practice, so backporting shouldn't be needed at this point. Please drop this patch from the your queues.
Ok, all dropped, thanks for looking into it.
greg k-h
linux-stable-mirror@lists.linaro.org