The patch below does not apply to the 5.10-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
To reproduce the conflict and resubmit, you may use the following commands:
git fetch https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/ linux-5.10.y
git checkout FETCH_HEAD
git cherry-pick -x 8bdd9ef7e9b1b2a73e394712b72b22055e0e26c3
# <resolve conflicts, build, test, etc.>
git commit -s
git send-email --to '<stable(a)vger.kernel.org>' --in-reply-to '2024081220-brethren-diagnoses-2569@gregkh' --subject-prefix 'PATCH 5.10.y' HEAD^..
Possible dependencies:
8bdd9ef7e9b1 ("drm/i915/gem: Fix Virtual Memory mapping boundaries calculation")
8e4ee5e87ce6 ("drm/i915: Wrap all access to i915_vma.node.start|size")
3bb6a44251b4 ("drm/i915: Rename ggtt_view as gtt_view")
d976521a995a ("drm/i915: extend i915_vma_pin_iomap()")
d63ddca7c581 ("drm/i915: Update tiled blits selftest")
a0ed9c95cce6 ("drm/i915/gt: Use XY_FAST_COLOR_BLT to clear obj on graphics ver 12+")
fd5803e5eebe ("drm/i915/gt: use engine instance directly for offset")
892bfb8a604d ("drm/i915/fbdev: fixup setting screen_size")
c674c5b9342e ("drm/i915/xehp: CCS should use RCS setup functions")
30b9d1b3ef37 ("drm/i915: add I915_BO_ALLOC_GPU_ONLY")
3312a4ac8a46 ("drm/i915/ttm: require mappable by default")
8fbf28934acf ("drm/i915/ttm: fixup the mock_bo")
db927686e43f ("Merge drm/drm-next into drm-intel-gt-next")
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
From 8bdd9ef7e9b1b2a73e394712b72b22055e0e26c3 Mon Sep 17 00:00:00 2001
From: Andi Shyti <andi.shyti(a)linux.intel.com>
Date: Fri, 2 Aug 2024 10:38:50 +0200
Subject: [PATCH] drm/i915/gem: Fix Virtual Memory mapping boundaries
calculation
Calculating the size of the mapped area as the lesser value
between the requested size and the actual size does not consider
the partial mapping offset. This can cause page fault access.
Fix the calculation of the starting and ending addresses, the
total size is now deduced from the difference between the end and
start addresses.
Additionally, the calculations have been rewritten in a clearer
and more understandable form.
Fixes: c58305af1835 ("drm/i915: Use remap_io_mapping() to prefault all PTE in a single pass")
Reported-by: Jann Horn <jannh(a)google.com>
Co-developed-by: Chris Wilson <chris.p.wilson(a)linux.intel.com>
Signed-off-by: Chris Wilson <chris.p.wilson(a)linux.intel.com>
Signed-off-by: Andi Shyti <andi.shyti(a)linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen(a)linux.intel.com>
Cc: Matthew Auld <matthew.auld(a)intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi(a)intel.com>
Cc: <stable(a)vger.kernel.org> # v4.9+
Reviewed-by: Jann Horn <jannh(a)google.com>
Reviewed-by: Jonathan Cavitt <Jonathan.cavitt(a)intel.com>
[Joonas: Add Requires: tag]
Requires: 60a2066c5005 ("drm/i915/gem: Adjust vma offset for framebuffer mmap offset")
Signed-off-by: Joonas Lahtinen <joonas.lahtinen(a)linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240802083850.103694-3-andi.…
(cherry picked from commit 97b6784753da06d9d40232328efc5c5367e53417)
Signed-off-by: Joonas Lahtinen <joonas.lahtinen(a)linux.intel.com>
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index ce10dd259812..cac6d4184506 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -290,6 +290,41 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
return i915_error_to_vmf_fault(err);
}
+static void set_address_limits(struct vm_area_struct *area,
+ struct i915_vma *vma,
+ unsigned long obj_offset,
+ unsigned long *start_vaddr,
+ unsigned long *end_vaddr)
+{
+ unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
+ long start, end; /* memory boundaries */
+
+ /*
+ * Let's move into the ">> PAGE_SHIFT"
+ * domain to be sure not to lose bits
+ */
+ vm_start = area->vm_start >> PAGE_SHIFT;
+ vm_end = area->vm_end >> PAGE_SHIFT;
+ vma_size = vma->size >> PAGE_SHIFT;
+
+ /*
+ * Calculate the memory boundaries by considering the offset
+ * provided by the user during memory mapping and the offset
+ * provided for the partial mapping.
+ */
+ start = vm_start;
+ start -= obj_offset;
+ start += vma->gtt_view.partial.offset;
+ end = start + vma_size;
+
+ start = max_t(long, start, vm_start);
+ end = min_t(long, end, vm_end);
+
+ /* Let's move back into the "<< PAGE_SHIFT" domain */
+ *start_vaddr = (unsigned long)start << PAGE_SHIFT;
+ *end_vaddr = (unsigned long)end << PAGE_SHIFT;
+}
+
static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
{
#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
@@ -302,14 +337,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
bool write = area->vm_flags & VM_WRITE;
struct i915_gem_ww_ctx ww;
+ unsigned long obj_offset;
+ unsigned long start, end; /* memory boundaries */
intel_wakeref_t wakeref;
struct i915_vma *vma;
pgoff_t page_offset;
+ unsigned long pfn;
int srcu;
int ret;
- /* We don't use vmf->pgoff since that has the fake offset */
+ obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
+ page_offset += obj_offset;
trace_i915_gem_object_fault(obj, page_offset, true, write);
@@ -402,12 +441,14 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
if (ret)
goto err_unpin;
+ set_address_limits(area, vma, obj_offset, &start, &end);
+
+ pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
+ pfn += (start - area->vm_start) >> PAGE_SHIFT;
+ pfn += obj_offset - vma->gtt_view.partial.offset;
+
/* Finally, remap it using the new GTT offset */
- ret = remap_io_mapping(area,
- area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
- min_t(u64, vma->size, area->vm_end - area->vm_start),
- &ggtt->iomap);
+ ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
if (ret)
goto err_fence;
The patch below does not apply to the 5.15-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
To reproduce the conflict and resubmit, you may use the following commands:
git fetch https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/ linux-5.15.y
git checkout FETCH_HEAD
git cherry-pick -x 8bdd9ef7e9b1b2a73e394712b72b22055e0e26c3
# <resolve conflicts, build, test, etc.>
git commit -s
git send-email --to '<stable(a)vger.kernel.org>' --in-reply-to '2024081219-scenic-profound-0b37@gregkh' --subject-prefix 'PATCH 5.15.y' HEAD^..
Possible dependencies:
8bdd9ef7e9b1 ("drm/i915/gem: Fix Virtual Memory mapping boundaries calculation")
8e4ee5e87ce6 ("drm/i915: Wrap all access to i915_vma.node.start|size")
3bb6a44251b4 ("drm/i915: Rename ggtt_view as gtt_view")
d976521a995a ("drm/i915: extend i915_vma_pin_iomap()")
d63ddca7c581 ("drm/i915: Update tiled blits selftest")
a0ed9c95cce6 ("drm/i915/gt: Use XY_FAST_COLOR_BLT to clear obj on graphics ver 12+")
fd5803e5eebe ("drm/i915/gt: use engine instance directly for offset")
892bfb8a604d ("drm/i915/fbdev: fixup setting screen_size")
c674c5b9342e ("drm/i915/xehp: CCS should use RCS setup functions")
30b9d1b3ef37 ("drm/i915: add I915_BO_ALLOC_GPU_ONLY")
3312a4ac8a46 ("drm/i915/ttm: require mappable by default")
8fbf28934acf ("drm/i915/ttm: fixup the mock_bo")
db927686e43f ("Merge drm/drm-next into drm-intel-gt-next")
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
From 8bdd9ef7e9b1b2a73e394712b72b22055e0e26c3 Mon Sep 17 00:00:00 2001
From: Andi Shyti <andi.shyti(a)linux.intel.com>
Date: Fri, 2 Aug 2024 10:38:50 +0200
Subject: [PATCH] drm/i915/gem: Fix Virtual Memory mapping boundaries
calculation
Calculating the size of the mapped area as the lesser value
between the requested size and the actual size does not consider
the partial mapping offset. This can cause page fault access.
Fix the calculation of the starting and ending addresses, the
total size is now deduced from the difference between the end and
start addresses.
Additionally, the calculations have been rewritten in a clearer
and more understandable form.
Fixes: c58305af1835 ("drm/i915: Use remap_io_mapping() to prefault all PTE in a single pass")
Reported-by: Jann Horn <jannh(a)google.com>
Co-developed-by: Chris Wilson <chris.p.wilson(a)linux.intel.com>
Signed-off-by: Chris Wilson <chris.p.wilson(a)linux.intel.com>
Signed-off-by: Andi Shyti <andi.shyti(a)linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen(a)linux.intel.com>
Cc: Matthew Auld <matthew.auld(a)intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi(a)intel.com>
Cc: <stable(a)vger.kernel.org> # v4.9+
Reviewed-by: Jann Horn <jannh(a)google.com>
Reviewed-by: Jonathan Cavitt <Jonathan.cavitt(a)intel.com>
[Joonas: Add Requires: tag]
Requires: 60a2066c5005 ("drm/i915/gem: Adjust vma offset for framebuffer mmap offset")
Signed-off-by: Joonas Lahtinen <joonas.lahtinen(a)linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240802083850.103694-3-andi.…
(cherry picked from commit 97b6784753da06d9d40232328efc5c5367e53417)
Signed-off-by: Joonas Lahtinen <joonas.lahtinen(a)linux.intel.com>
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index ce10dd259812..cac6d4184506 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -290,6 +290,41 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
return i915_error_to_vmf_fault(err);
}
+static void set_address_limits(struct vm_area_struct *area,
+ struct i915_vma *vma,
+ unsigned long obj_offset,
+ unsigned long *start_vaddr,
+ unsigned long *end_vaddr)
+{
+ unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
+ long start, end; /* memory boundaries */
+
+ /*
+ * Let's move into the ">> PAGE_SHIFT"
+ * domain to be sure not to lose bits
+ */
+ vm_start = area->vm_start >> PAGE_SHIFT;
+ vm_end = area->vm_end >> PAGE_SHIFT;
+ vma_size = vma->size >> PAGE_SHIFT;
+
+ /*
+ * Calculate the memory boundaries by considering the offset
+ * provided by the user during memory mapping and the offset
+ * provided for the partial mapping.
+ */
+ start = vm_start;
+ start -= obj_offset;
+ start += vma->gtt_view.partial.offset;
+ end = start + vma_size;
+
+ start = max_t(long, start, vm_start);
+ end = min_t(long, end, vm_end);
+
+ /* Let's move back into the "<< PAGE_SHIFT" domain */
+ *start_vaddr = (unsigned long)start << PAGE_SHIFT;
+ *end_vaddr = (unsigned long)end << PAGE_SHIFT;
+}
+
static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
{
#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
@@ -302,14 +337,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
bool write = area->vm_flags & VM_WRITE;
struct i915_gem_ww_ctx ww;
+ unsigned long obj_offset;
+ unsigned long start, end; /* memory boundaries */
intel_wakeref_t wakeref;
struct i915_vma *vma;
pgoff_t page_offset;
+ unsigned long pfn;
int srcu;
int ret;
- /* We don't use vmf->pgoff since that has the fake offset */
+ obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
+ page_offset += obj_offset;
trace_i915_gem_object_fault(obj, page_offset, true, write);
@@ -402,12 +441,14 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
if (ret)
goto err_unpin;
+ set_address_limits(area, vma, obj_offset, &start, &end);
+
+ pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
+ pfn += (start - area->vm_start) >> PAGE_SHIFT;
+ pfn += obj_offset - vma->gtt_view.partial.offset;
+
/* Finally, remap it using the new GTT offset */
- ret = remap_io_mapping(area,
- area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
- min_t(u64, vma->size, area->vm_end - area->vm_start),
- &ggtt->iomap);
+ ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
if (ret)
goto err_fence;
From: Mahesh Salgaonkar <mahesh(a)linux.ibm.com>
[ Upstream commit 0db880fc865ffb522141ced4bfa66c12ab1fbb70 ]
nmi_enter()/nmi_exit() touches per cpu variables which can lead to kernel
crash when invoked during real mode interrupt handling (e.g. early HMI/MCE
interrupt handler) if percpu allocation comes from vmalloc area.
Early HMI/MCE handlers are called through DEFINE_INTERRUPT_HANDLER_NMI()
wrapper which invokes nmi_enter/nmi_exit calls. We don't see any issue when
percpu allocation is from the embedded first chunk. However with
CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there are chances where percpu
allocation can come from the vmalloc area.
With kernel command line "percpu_alloc=page" we can force percpu allocation
to come from vmalloc area and can see kernel crash in machine_check_early:
[ 1.215714] NIP [c000000000e49eb4] rcu_nmi_enter+0x24/0x110
[ 1.215717] LR [c0000000000461a0] machine_check_early+0xf0/0x2c0
[ 1.215719] --- interrupt: 200
[ 1.215720] [c000000fffd73180] [0000000000000000] 0x0 (unreliable)
[ 1.215722] [c000000fffd731b0] [0000000000000000] 0x0
[ 1.215724] [c000000fffd73210] [c000000000008364] machine_check_early_common+0x134/0x1f8
Fix this by avoiding use of nmi_enter()/nmi_exit() in real mode if percpu
first chunk is not embedded.
CVE-2024-42126
Cc: stable(a)vger.kernel.org#5.15.x
Cc: gregkh(a)linuxfoundation.org
Reviewed-by: Christophe Leroy <christophe.leroy(a)csgroup.eu>
Tested-by: Shirisha Ganta <shirisha(a)linux.ibm.com>
Signed-off-by: Mahesh Salgaonkar <mahesh(a)linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe(a)ellerman.id.au>
Link: https://msgid.link/20240410043006.81577-1-mahesh@linux.ibm.com
[ Conflicts in arch/powerpc/include/asm/interrupt.h
because interrupt_nmi_enter_prepare() and interrupt_nmi_exit_prepare()
has been refactored. ]
Signed-off-by: Jinjie Ruan <ruanjinjie(a)huawei.com>
---
arch/powerpc/include/asm/interrupt.h | 14 ++++++++++----
arch/powerpc/include/asm/percpu.h | 10 ++++++++++
arch/powerpc/kernel/setup_64.c | 2 ++
3 files changed, 22 insertions(+), 4 deletions(-)
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index e592e65e7665..49285b147afe 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -285,18 +285,24 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
/*
* Do not use nmi_enter() for pseries hash guest taking a real-mode
* NMI because not everything it touches is within the RMA limit.
+ *
+ * Likewise, do not use it in real mode if percpu first chunk is not
+ * embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there
+ * are chances where percpu allocation can come from vmalloc area.
*/
- if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
+ if ((!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
!firmware_has_feature(FW_FEATURE_LPAR) ||
- radix_enabled() || (mfmsr() & MSR_DR))
+ radix_enabled() || (mfmsr() & MSR_DR)) &&
+ !percpu_first_chunk_is_paged)
nmi_enter();
}
static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
{
- if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
+ if ((!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
!firmware_has_feature(FW_FEATURE_LPAR) ||
- radix_enabled() || (mfmsr() & MSR_DR))
+ radix_enabled() || (mfmsr() & MSR_DR)) &&
+ !percpu_first_chunk_is_paged)
nmi_exit();
/*
diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h
index 8e5b7d0b851c..634970ce13c6 100644
--- a/arch/powerpc/include/asm/percpu.h
+++ b/arch/powerpc/include/asm/percpu.h
@@ -15,6 +15,16 @@
#endif /* CONFIG_SMP */
#endif /* __powerpc64__ */
+#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) && defined(CONFIG_SMP)
+#include <linux/jump_label.h>
+DECLARE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
+
+#define percpu_first_chunk_is_paged \
+ (static_key_enabled(&__percpu_first_chunk_is_paged.key))
+#else
+#define percpu_first_chunk_is_paged false
+#endif /* CONFIG_PPC64 && CONFIG_SMP */
+
#include <asm-generic/percpu.h>
#include <asm/paca.h>
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index eaa79a0996d1..37d5683ab298 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -825,6 +825,7 @@ static int pcpu_cpu_distance(unsigned int from, unsigned int to)
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
+DEFINE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
static void __init pcpu_populate_pte(unsigned long addr)
{
@@ -904,6 +905,7 @@ void __init setup_per_cpu_areas(void)
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);
+ static_key_enable(&__percpu_first_chunk_is_paged.key);
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu) {
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
--
2.34.1
From: Long Li <longli(a)microsoft.com>
After napi_complete_done() is called when NAPI is polling in the current
process context, another NAPI may be scheduled and start running in
softirq on another CPU and may ring the doorbell before the current CPU
does. When combined with unnecessary rings when there is no need to arm
the CQ, it triggers error paths in the hardware.
This patch fixes this by calling napi_complete_done() after doorbell
rings. It limits the number of unnecessary rings when there is
no need to arm. MANA hardware specifies that there must be one doorbell
ring every 8 CQ wraparounds. This driver guarantees one doorbell ring as
soon as the number of consumed CQEs exceeds 4 CQ wraparounds. In practical
workloads, the 4 CQ wraparounds proves to be big enough that it rarely
exceeds this limit before all the napi weight is consumed.
To implement this, add a per-CQ counter cq->work_done_since_doorbell,
and make sure the CQ is armed as soon as passing 4 wraparounds of the CQ.
Cc: stable(a)vger.kernel.org
Fixes: e1b5683ff62e ("net: mana: Move NAPI from EQ to CQ")
Reviewed-by: Haiyang Zhang <haiyangz(a)microsoft.com>
Signed-off-by: Long Li <longli(a)microsoft.com>
---
change in v2:
Added more details to comments to explain the patch.
change in v3:
Corrected typo. Removed extra empty lines between "Fixes" and "Reviewed-by".
drivers/net/ethernet/microsoft/mana/mana_en.c | 24 ++++++++++++-------
include/net/mana/mana.h | 1 +
2 files changed, 16 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index d2f07e179e86..f83211f9e737 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1788,7 +1788,6 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
{
struct mana_cq *cq = context;
- u8 arm_bit;
int w;
WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
@@ -1799,16 +1798,23 @@ static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
mana_poll_tx_cq(cq);
w = cq->work_done;
-
- if (w < cq->budget &&
- napi_complete_done(&cq->napi, w)) {
- arm_bit = SET_ARM_BIT;
- } else {
- arm_bit = 0;
+ cq->work_done_since_doorbell += w;
+
+ if (w < cq->budget) {
+ mana_gd_ring_cq(gdma_queue, SET_ARM_BIT);
+ cq->work_done_since_doorbell = 0;
+ napi_complete_done(&cq->napi, w);
+ } else if (cq->work_done_since_doorbell >
+ cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) {
+ /* MANA hardware requires at least one doorbell ring every 8
+ * wraparounds of CQ even if there is no need to arm the CQ.
+ * This driver rings the doorbell as soon as we have exceeded
+ * 4 wraparounds.
+ */
+ mana_gd_ring_cq(gdma_queue, 0);
+ cq->work_done_since_doorbell = 0;
}
- mana_gd_ring_cq(gdma_queue, arm_bit);
-
return w;
}
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 6439fd8b437b..7caa334f4888 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -275,6 +275,7 @@ struct mana_cq {
/* NAPI data */
struct napi_struct napi;
int work_done;
+ int work_done_since_doorbell;
int budget;
};
--
2.17.1
In parport_attach, the return value of ida_alloc is unchecked, witch leads
to the use of an invalid index value.
To address this issue, index should be checked. When the index value is
abnormal, the device should be freed.
Found by code review, compile tested only.
Cc: stable(a)vger.kernel.org
Fixes: 55dbc5b5174d ("pps: remove usage of the deprecated ida_simple_xx() API")
Signed-off-by: Ma Ke <make24(a)iscas.ac.cn>
---
Changes in v2:
- removed error output as suggestions.
---
drivers/pps/clients/pps_parport.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
index 63d03a0df5cc..abaffb4e1c1c 100644
--- a/drivers/pps/clients/pps_parport.c
+++ b/drivers/pps/clients/pps_parport.c
@@ -149,6 +149,9 @@ static void parport_attach(struct parport *port)
}
index = ida_alloc(&pps_client_index, GFP_KERNEL);
+ if (index < 0)
+ goto err_free_device;
+
memset(&pps_client_cb, 0, sizeof(pps_client_cb));
pps_client_cb.private = device;
pps_client_cb.irq_func = parport_irq;
@@ -159,7 +162,7 @@ static void parport_attach(struct parport *port)
index);
if (!device->pardev) {
pr_err("couldn't register with %s\n", port->name);
- goto err_free;
+ goto err_free_ida;
}
if (parport_claim_or_block(device->pardev) < 0) {
@@ -187,8 +190,9 @@ static void parport_attach(struct parport *port)
parport_release(device->pardev);
err_unregister_dev:
parport_unregister_device(device->pardev);
-err_free:
+err_free_ida:
ida_free(&pps_client_index, index);
+err_free_device:
kfree(device);
}
--
2.25.1
The patch below does not apply to the 5.15-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
To reproduce the conflict and resubmit, you may use the following commands:
git fetch https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/ linux-5.15.y
git checkout FETCH_HEAD
git cherry-pick -x d67c5649c1541dc93f202eeffc6f49220a4ed71d
# <resolve conflicts, build, test, etc.>
git commit -s
git send-email --to '<stable(a)vger.kernel.org>' --in-reply-to '2024081209-wrongly-zen-35f3@gregkh' --subject-prefix 'PATCH 5.15.y' HEAD^..
Possible dependencies:
d67c5649c154 ("mptcp: fully established after ADD_ADDR echo on MPJ")
b3ea6b272d79 ("mptcp: consolidate initial ack seq generation")
f3589be0c420 ("mptcp: never shrink offered window")
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
From d67c5649c1541dc93f202eeffc6f49220a4ed71d Mon Sep 17 00:00:00 2001
From: "Matthieu Baerts (NGI0)" <matttbe(a)kernel.org>
Date: Wed, 31 Jul 2024 13:05:53 +0200
Subject: [PATCH] mptcp: fully established after ADD_ADDR echo on MPJ
Before this patch, receiving an ADD_ADDR echo on the just connected
MP_JOIN subflow -- initiator side, after the MP_JOIN 3WHS -- was
resulting in an MP_RESET. That's because only ACKs with a DSS or
ADD_ADDRs without the echo bit were allowed.
Not allowing the ADD_ADDR echo after an MP_CAPABLE 3WHS makes sense, as
we are not supposed to send an ADD_ADDR before because it requires to be
in full established mode first. For the MP_JOIN 3WHS, that's different:
the ADD_ADDR can be sent on a previous subflow, and the ADD_ADDR echo
can be received on the recently created one. The other peer will already
be in fully established, so it is allowed to send that.
We can then relax the conditions here to accept the ADD_ADDR echo for
MPJ subflows.
Fixes: 67b12f792d5e ("mptcp: full fully established support after ADD_ADDR")
Cc: stable(a)vger.kernel.org
Reviewed-by: Mat Martineau <martineau(a)kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe(a)kernel.org>
Link: https://patch.msgid.link/20240731-upstream-net-20240731-mptcp-endp-subflow-…
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 8a68382a4fe9..ac2f1a54cc43 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -958,7 +958,8 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
if (subflow->remote_key_valid &&
(((mp_opt->suboptions & OPTION_MPTCP_DSS) && mp_opt->use_ack) ||
- ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) && !mp_opt->echo))) {
+ ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) &&
+ (!mp_opt->echo || subflow->mp_join)))) {
/* subflows are fully established as soon as we get any
* additional ack, including ADD_ADDR.
*/
From: Mahesh Salgaonkar <mahesh(a)linux.ibm.com>
[ Upstream commit 0db880fc865ffb522141ced4bfa66c12ab1fbb70 ]
nmi_enter()/nmi_exit() touches per cpu variables which can lead to kernel
crash when invoked during real mode interrupt handling (e.g. early HMI/MCE
interrupt handler) if percpu allocation comes from vmalloc area.
Early HMI/MCE handlers are called through DEFINE_INTERRUPT_HANDLER_NMI()
wrapper which invokes nmi_enter/nmi_exit calls. We don't see any issue when
percpu allocation is from the embedded first chunk. However with
CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there are chances where percpu
allocation can come from the vmalloc area.
With kernel command line "percpu_alloc=page" we can force percpu allocation
to come from vmalloc area and can see kernel crash in machine_check_early:
[ 1.215714] NIP [c000000000e49eb4] rcu_nmi_enter+0x24/0x110
[ 1.215717] LR [c0000000000461a0] machine_check_early+0xf0/0x2c0
[ 1.215719] --- interrupt: 200
[ 1.215720] [c000000fffd73180] [0000000000000000] 0x0 (unreliable)
[ 1.215722] [c000000fffd731b0] [0000000000000000] 0x0
[ 1.215724] [c000000fffd73210] [c000000000008364] machine_check_early_common+0x134/0x1f8
Fix this by avoiding use of nmi_enter()/nmi_exit() in real mode if percpu
first chunk is not embedded.
CVE-2024-42126
Cc: stable(a)vger.kernel.org#5.10.x
Cc: gregkh(a)linuxfoundation.org
Reviewed-by: Christophe Leroy <christophe.leroy(a)csgroup.eu>
Tested-by: Shirisha Ganta <shirisha(a)linux.ibm.com>
Signed-off-by: Mahesh Salgaonkar <mahesh(a)linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe(a)ellerman.id.au>
Link: https://msgid.link/20240410043006.81577-1-mahesh@linux.ibm.com
[ Conflicts in arch/powerpc/include/asm/interrupt.h
because machine_check_early() and machine_check_exception()
has been refactored. ]
Signed-off-by: Jinjie Ruan <ruanjinjie(a)huawei.com>
---
v2:
- Also fix for CONFIG_PPC_BOOK3S_64 not enabled.
- Add Upstream.
- Cc stable(a)vger.kernel.org.
---
arch/powerpc/include/asm/percpu.h | 10 ++++++++++
arch/powerpc/kernel/mce.c | 14 +++++++++++---
arch/powerpc/kernel/setup_64.c | 2 ++
arch/powerpc/kernel/traps.c | 8 +++++++-
4 files changed, 30 insertions(+), 4 deletions(-)
diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h
index 8e5b7d0b851c..634970ce13c6 100644
--- a/arch/powerpc/include/asm/percpu.h
+++ b/arch/powerpc/include/asm/percpu.h
@@ -15,6 +15,16 @@
#endif /* CONFIG_SMP */
#endif /* __powerpc64__ */
+#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) && defined(CONFIG_SMP)
+#include <linux/jump_label.h>
+DECLARE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
+
+#define percpu_first_chunk_is_paged \
+ (static_key_enabled(&__percpu_first_chunk_is_paged.key))
+#else
+#define percpu_first_chunk_is_paged false
+#endif /* CONFIG_PPC64 && CONFIG_SMP */
+
#include <asm-generic/percpu.h>
#include <asm/paca.h>
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 63702c0badb9..259343040e1b 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -594,8 +594,15 @@ long notrace machine_check_early(struct pt_regs *regs)
u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
this_cpu_set_ftrace_enabled(0);
- /* Do not use nmi_enter/exit for pseries hpte guest */
- if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
+ /*
+ * Do not use nmi_enter/exit for pseries hpte guest
+ *
+ * Likewise, do not use it in real mode if percpu first chunk is not
+ * embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there
+ * are chances where percpu allocation can come from vmalloc area.
+ */
+ if ((radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR)) &&
+ !percpu_first_chunk_is_paged)
nmi_enter();
hv_nmi_check_nonrecoverable(regs);
@@ -606,7 +613,8 @@ long notrace machine_check_early(struct pt_regs *regs)
if (ppc_md.machine_check_early)
handled = ppc_md.machine_check_early(regs);
- if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
+ if ((radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR)) &&
+ !percpu_first_chunk_is_paged)
nmi_exit();
this_cpu_set_ftrace_enabled(ftrace_enabled);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 3f8426bccd16..899d87de0165 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -824,6 +824,7 @@ static int pcpu_cpu_distance(unsigned int from, unsigned int to)
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
+DEFINE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
static void __init pcpu_populate_pte(unsigned long addr)
{
@@ -903,6 +904,7 @@ void __init setup_per_cpu_areas(void)
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);
+ static_key_enable(&__percpu_first_chunk_is_paged.key);
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu) {
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index b0e87dce2b9a..b4d108bef814 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -835,8 +835,14 @@ void machine_check_exception(struct pt_regs *regs)
* This is silly. The BOOK3S_64 should just call a different function
* rather than expecting semantics to magically change. Something
* like 'non_nmi_machine_check_exception()', perhaps?
+ *
+ * Do not use nmi_enter/exit in real mode if percpu first chunk is
+ * not embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled
+ * there are chances where percpu allocation can come from
+ * vmalloc area.
*/
- const bool nmi = !IS_ENABLED(CONFIG_PPC_BOOK3S_64);
+ const bool nmi = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
+ !percpu_first_chunk_is_paged;
if (nmi) nmi_enter();
--
2.34.1
From: Johannes Berg <johannes.berg(a)intel.com>
commit ce04abc3fcc62cd5640af981ebfd7c4dc3bded28 upstream.
When userspace sets basic rates, it might send us some rates
list that's empty or consists of invalid values only. We're
currently ignoring invalid values and then may end up with a
rates bitmap that's empty, which later results in a warning.
Reject the call if there were no valid rates.
[ Conflict resolution involved adjusting the patch to accommodate
changes in the function signature of ieee80211_parse_bitrates,
specifically the updated first parameter ]
Signed-off-by: Johannes Berg <johannes.berg(a)intel.com>
Reported-by: syzbot+19013115c9786bfd0c4e(a)syzkaller.appspotmail.com
Tested-by: syzbot+19013115c9786bfd0c4e(a)syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=19013115c9786bfd0c4e
Signed-off-by: Vincenzo Mezzela <vincenzo.mezzela(a)gmail.com>
---
Hi,
I'm resending the backport for v5.15 [1], following the one I previously
sent for v6.1 [2].
Best regards,
Vincenzo
- [1] https://lore.kernel.org/all/20240727125033.1774143-1-vincenzo.mezzela@gmail…
- [2] https://lore.kernel.org/all/20240729134318.291424-1-vincenzo.mezzela@gmail.…
net/mac80211/cfg.c | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index f277ce839ddb..85abd3ff07b4 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2339,6 +2339,17 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
if (!sband)
return -EINVAL;
+ if (params->basic_rates) {
+ if (!ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef,
+ wiphy->bands[sband->band],
+ params->basic_rates,
+ params->basic_rates_len,
+ &sdata->vif.bss_conf.basic_rates))
+ return -EINVAL;
+ changed |= BSS_CHANGED_BASIC_RATES;
+ ieee80211_check_rate_mask(sdata);
+ }
+
if (params->use_cts_prot >= 0) {
sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot;
changed |= BSS_CHANGED_ERP_CTS_PROT;
@@ -2362,16 +2373,6 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
changed |= BSS_CHANGED_ERP_SLOT;
}
- if (params->basic_rates) {
- ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef,
- wiphy->bands[sband->band],
- params->basic_rates,
- params->basic_rates_len,
- &sdata->vif.bss_conf.basic_rates);
- changed |= BSS_CHANGED_BASIC_RATES;
- ieee80211_check_rate_mask(sdata);
- }
-
if (params->ap_isolate >= 0) {
if (params->ap_isolate)
sdata->flags |= IEEE80211_SDATA_DONT_BRIDGE_PACKETS;
--
2.43.0
Hi Greg, Sasha,
This batch contains a backport for recent fixes already upstream for 4.19.x.
The following list shows the backported patches, I am using original commit
IDs for reference:
1) b53c11664250 ("netfilter: nf_tables: set element extended ACK reporting support")
2) 7395dfacfff6 ("netfilter: nf_tables: use timestamp to check for set element timeout")
3) cff3bd012a95 ("netfilter: nf_tables: prefer nft_chain_validate")
Please, apply,
Thanks
Florian Westphal (1):
netfilter: nf_tables: prefer nft_chain_validate
Pablo Neira Ayuso (2):
netfilter: nf_tables: set element extended ACK reporting support
netfilter: nf_tables: use timestamp to check for set element timeout
include/net/netfilter/nf_tables.h | 21 ++++-
net/netfilter/nf_tables_api.c | 128 ++++++------------------------
net/netfilter/nft_set_hash.c | 8 +-
net/netfilter/nft_set_rbtree.c | 6 +-
4 files changed, 53 insertions(+), 110 deletions(-)
--
2.30.2
Hi Greg, Sasha,
This batch contains a backport for recent fixes already upstream for 5.4.x.
The following list shows the backported patches, I am using original commit
IDs for reference:
1) b53c11664250 ("netfilter: nf_tables: set element extended ACK reporting support")
2) 7395dfacfff6 ("netfilter: nf_tables: use timestamp to check for set element timeout")
3) cff3bd012a95 ("netfilter: nf_tables: prefer nft_chain_validate")
Please, apply,
Thanks
Florian Westphal (1):
netfilter: nf_tables: prefer nft_chain_validate
Pablo Neira Ayuso (2):
netfilter: nf_tables: set element extended ACK reporting support
netfilter: nf_tables: use timestamp to check for set element timeout
include/net/netfilter/nf_tables.h | 21 ++++-
net/netfilter/nf_tables_api.c | 128 ++++++------------------------
net/netfilter/nft_set_hash.c | 8 +-
net/netfilter/nft_set_rbtree.c | 6 +-
4 files changed, 53 insertions(+), 110 deletions(-)
--
2.30.2