The GGTT looks to be stored inside stolen memory on igpu which is not
treated as normal RAM. The core kernel skips this memory range when
creating the hibernation image, therefore when coming back from
hibernation the GGTT programming is lost. This seems to cause issues
with broken resume where GuC FW fails to load:
[drm] *ERROR* GT0: load failed: status = 0x400000A0, time = 10ms, freq = 1250MHz (req 1300MHz), done = -1
[drm] *ERROR* GT0: load failed: status: Reset = 0, BootROM = 0x50, UKernel = 0x00, MIA = 0x00, Auth = 0x01
[drm] *ERROR* GT0: firmware signature verification failed
[drm] *ERROR* CRITICAL: Xe has declared device 0000:00:02.0 as wedged.
Current GGTT users are kernel internal and tracked as pinned, so it
should be possible to hook into the existing save/restore logic that we
use for dgpu, where the actual evict is skipped but on restore we
importantly restore the GGTT programming. This has been confirmed to
fix hibernation on at least ADL and MTL, though likely all igpu
platforms are affected.
This also means we have a hole in our testing, where the existing s4
tests only really test the driver hooks, and don't go as far as actually
rebooting and restoring from the hibernation image and in turn powering
down RAM (and therefore losing the contents of stolen).
Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/3275
Signed-off-by: Matthew Auld <matthew.auld(a)intel.com>
Cc: Matthew Brost <matthew.brost(a)intel.com>
Cc: <stable(a)vger.kernel.org> # v6.8+
---
drivers/gpu/drm/xe/xe_bo.c | 36 ++++++++++++++------------------
drivers/gpu/drm/xe/xe_bo_evict.c | 6 ------
2 files changed, 16 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index d79d8ef5c7d5..0ae5c8f7bab8 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -950,7 +950,10 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
if (WARN_ON(!xe_bo_is_pinned(bo)))
return -EINVAL;
- if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm))
+ if (WARN_ON(xe_bo_is_vram(bo)))
+ return -EINVAL;
+
+ if (WARN_ON(!bo->ttm.ttm && !xe_bo_is_stolen(bo)))
return -EINVAL;
if (!mem_type_is_vram(place->mem_type))
@@ -1770,6 +1773,7 @@ int xe_bo_pin_external(struct xe_bo *bo)
int xe_bo_pin(struct xe_bo *bo)
{
+ struct ttm_place *place = &(bo->placements[0]);
struct xe_device *xe = xe_bo_device(bo);
int err;
@@ -1800,7 +1804,6 @@ int xe_bo_pin(struct xe_bo *bo)
*/
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
- struct ttm_place *place = &(bo->placements[0]);
if (mem_type_is_vram(place->mem_type)) {
xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
@@ -1809,13 +1812,12 @@ int xe_bo_pin(struct xe_bo *bo)
vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
}
+ }
- if (mem_type_is_vram(place->mem_type) ||
- bo->flags & XE_BO_FLAG_GGTT) {
- spin_lock(&xe->pinned.lock);
- list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
- spin_unlock(&xe->pinned.lock);
- }
+ if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
+ spin_lock(&xe->pinned.lock);
+ list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
+ spin_unlock(&xe->pinned.lock);
}
ttm_bo_pin(&bo->ttm);
@@ -1863,24 +1865,18 @@ void xe_bo_unpin_external(struct xe_bo *bo)
void xe_bo_unpin(struct xe_bo *bo)
{
+ struct ttm_place *place = &(bo->placements[0]);
struct xe_device *xe = xe_bo_device(bo);
xe_assert(xe, !bo->ttm.base.import_attach);
xe_assert(xe, xe_bo_is_pinned(bo));
- if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
- bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
- struct ttm_place *place = &(bo->placements[0]);
-
- if (mem_type_is_vram(place->mem_type) ||
- bo->flags & XE_BO_FLAG_GGTT) {
- spin_lock(&xe->pinned.lock);
- xe_assert(xe, !list_empty(&bo->pinned_link));
- list_del_init(&bo->pinned_link);
- spin_unlock(&xe->pinned.lock);
- }
+ if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
+ spin_lock(&xe->pinned.lock);
+ xe_assert(xe, !list_empty(&bo->pinned_link));
+ list_del_init(&bo->pinned_link);
+ spin_unlock(&xe->pinned.lock);
}
-
ttm_bo_unpin(&bo->ttm);
}
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index 32043e1e5a86..b01bc20eb90b 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -34,9 +34,6 @@ int xe_bo_evict_all(struct xe_device *xe)
u8 id;
int ret;
- if (!IS_DGFX(xe))
- return 0;
-
/* User memory */
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
struct ttm_resource_manager *man =
@@ -125,9 +122,6 @@ int xe_bo_restore_kernel(struct xe_device *xe)
struct xe_bo *bo;
int ret;
- if (!IS_DGFX(xe))
- return 0;
-
spin_lock(&xe->pinned.lock);
for (;;) {
bo = list_first_entry_or_null(&xe->pinned.evicted,
--
2.47.0
On Fri, Nov 1, 2024 at 6:58 AM Sasha Levin <sashal(a)kernel.org> wrote:
>
> This is a note to let you know that I've just added the patch titled
>
> lib: alloc_tag_module_unload must wait for pending kfree_rcu calls
>
> to the 6.11-stable tree which can be found at:
> http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
>
> The filename of the patch is:
> lib-alloc_tag_module_unload-must-wait-for-pending-kf.patch
> and it can be found in the queue-6.11 subdirectory.
Thanks Sasha! Could you please double-check that the prerequisite
patch https://lore.kernel.org/all/20241021171003.2907935-1-surenb@google.com/
was also picked up? I don't see it in the queue-6.11 directory.
Without that patch this one will cause build errors, that's why I sent
them as a patchset.
Thanks,
Suren.
>
> If you, or anyone else, feels it should not be added to the stable tree,
> please let <stable(a)vger.kernel.org> know about it.
>
>
>
> commit 536dfe685ebd28b27ebfbc3d4b9168207b7e28a3
> Author: Florian Westphal <fw(a)strlen.de>
> Date: Mon Oct 7 22:52:24 2024 +0200
>
> lib: alloc_tag_module_unload must wait for pending kfree_rcu calls
>
> [ Upstream commit dc783ba4b9df3fb3e76e968b2cbeb9960069263c ]
>
> Ben Greear reports following splat:
> ------------[ cut here ]------------
> net/netfilter/nf_nat_core.c:1114 module nf_nat func:nf_nat_register_fn has 256 allocated at module unload
> WARNING: CPU: 1 PID: 10421 at lib/alloc_tag.c:168 alloc_tag_module_unload+0x22b/0x3f0
> Modules linked in: nf_nat(-) btrfs ufs qnx4 hfsplus hfs minix vfat msdos fat
> ...
> Hardware name: Default string Default string/SKYBAY, BIOS 5.12 08/04/2020
> RIP: 0010:alloc_tag_module_unload+0x22b/0x3f0
> codetag_unload_module+0x19b/0x2a0
> ? codetag_load_module+0x80/0x80
>
> nf_nat module exit calls kfree_rcu on those addresses, but the free
> operation is likely still pending by the time alloc_tag checks for leaks.
>
> Wait for outstanding kfree_rcu operations to complete before checking
> resolves this warning.
>
> Reproducer:
> unshare -n iptables-nft -t nat -A PREROUTING -p tcp
> grep nf_nat /proc/allocinfo # will list 4 allocations
> rmmod nft_chain_nat
> rmmod nf_nat # will WARN.
>
> [akpm(a)linux-foundation.org: add comment]
> Link: https://lkml.kernel.org/r/20241007205236.11847-1-fw@strlen.de
> Fixes: a473573964e5 ("lib: code tagging module support")
> Signed-off-by: Florian Westphal <fw(a)strlen.de>
> Reported-by: Ben Greear <greearb(a)candelatech.com>
> Closes: https://lore.kernel.org/netdev/bdaaef9d-4364-4171-b82b-bcfc12e207eb@candela…
> Cc: Uladzislau Rezki <urezki(a)gmail.com>
> Cc: Vlastimil Babka <vbabka(a)suse.cz>
> Cc: Suren Baghdasaryan <surenb(a)google.com>
> Cc: Kent Overstreet <kent.overstreet(a)linux.dev>
> Cc: <stable(a)vger.kernel.org>
> Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
> Signed-off-by: Sasha Levin <sashal(a)kernel.org>
>
> diff --git a/lib/codetag.c b/lib/codetag.c
> index afa8a2d4f3173..d1fbbb7c2ec3d 100644
> --- a/lib/codetag.c
> +++ b/lib/codetag.c
> @@ -228,6 +228,9 @@ bool codetag_unload_module(struct module *mod)
> if (!mod)
> return true;
>
> + /* await any module's kfree_rcu() operations to complete */
> + kvfree_rcu_barrier();
> +
> mutex_lock(&codetag_lock);
> list_for_each_entry(cttype, &codetag_types, link) {
> struct codetag_module *found = NULL;
From: MrRurikov <grurikov(a)gmal.com>
After having been assigned to a NULL value at rdma.c:1758, pointer 'queue'
is passed as 1st parameter in call to function
'nvmet_rdma_queue_established' at rdma.c:1773, as 1st parameter in call
to function 'nvmet_rdma_queue_disconnect' at rdma.c:1787 and as 2nd
parameter in call to function 'nvmet_rdma_queue_connect_fail' at
rdma.c:1800, where it is dereferenced.
I understand, that driver is confident that the
RDMA_CM_EVENT_CONNECT_REQUEST event will occur first and perform
initialization, but maliciously prepared hardware could send events in
violation of the protocol. Nothing guarantees that the sequence of events
will start with RDMA_CM_EVENT_CONNECT_REQUEST.
Found by Linux Verification Center (linuxtesting.org) with SVACE
Fixes: e1a2ee249b19 ("nvmet-rdma: Fix use after free in nvmet_rdma_cm_handler()")
Cc: stable(a)vger.kernel.org
Signed-off-by: George Rurikov <g.ryurikov(a)securitycode.ru>
---
drivers/nvme/target/rdma.c | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 1b6264fa5803..becebc95f349 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1770,8 +1770,10 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
ret = nvmet_rdma_queue_connect(cm_id, event);
break;
case RDMA_CM_EVENT_ESTABLISHED:
- nvmet_rdma_queue_established(queue);
- break;
+ if (!queue) {
+ nvmet_rdma_queue_established(queue);
+ break;
+ }
case RDMA_CM_EVENT_ADDR_CHANGE:
if (!queue) {
struct nvmet_rdma_port *port = cm_id->context;
@@ -1782,8 +1784,10 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
fallthrough;
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
- nvmet_rdma_queue_disconnect(queue);
- break;
+ if (!queue) {
+ nvmet_rdma_queue_disconnect(queue);
+ break;
+ }
case RDMA_CM_EVENT_DEVICE_REMOVAL:
ret = nvmet_rdma_device_removal(cm_id, queue);
break;
@@ -1793,8 +1797,10 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
fallthrough;
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_CONNECT_ERROR:
- nvmet_rdma_queue_connect_fail(cm_id, queue);
- break;
+ if (!queue) {
+ nvmet_rdma_queue_connect_fail(cm_id, queue);
+ break;
+ }
default:
pr_err("received unrecognized RDMA CM event %d\n",
event->event);
--
2.34.1
Hey,
Would you be interested in acquiring the attendees list of Passenger traffic Expo 2024?
List contains: Names, Titles, Phone Numbers, Company Details, and more…
Interested? Let me know so that I’ll send you the pricing for the same.
Kind Regards,
Camille Batiste
Marketing Executive
If you do not wish to receive our emails, please reply with "Not Interested."
From: Randy MacLeod <Randy.MacLeod(a)windriver.com>
This is my first commit to -stable so I'm going to carefully explain what I've done.
I work on the Yocto Project and I have done some work on the Linux network
stack a long time ago so I'm not quite a complete newbie.
I took the commit found here:
https://lore.kernel.org/stable/20240527185645.658299380@linuxfoundation.org/
and backported as per my commit log:
Based on above commit but simplified since pskb_may_pull_reason()
does not exist until 6.1.
I also trimmed the original commit log of the "Tested by dropwatch" section
as well as the full stack trace since that may have changed in 5.10/5.15 and
It compiles fine for 5.10 and 5.15 but I have not tested with dropwatch since
the patch is just dropping short xmit packets for bridging.
Finally, since the patch is much simpler than the original, I've removed the
original patch author's SOB line.
Please let me know if any of this is not what y'all'd like to see.
Randy MacLeod (1):
net: bridge: xmit: make sure we have at least eth header len bytes
net/bridge/br_device.c | 5 +++++
1 file changed, 5 insertions(+)
base-commit: 5a8fa04b2a4de1d52be4a04690dcb52ac7998943
--
2.34.1
The assignment of the of_node to the aux bridge needs to mark the
of_node as reused as well, otherwise resource providers like pinctrl will
report a gpio as already requested by a different device when both pinconf
and gpios property are present.
Fix that by using the device_set_of_node_from_dev() helper instead.
Fixes: 6914968a0b52 ("drm/bridge: properly refcount DT nodes in aux bridge drivers")
Cc: stable(a)vger.kernel.org # 6.8
Cc: Dmitry Baryshkov <dmitry.baryshkov(a)linaro.org>
Signed-off-by: Abel Vesa <abel.vesa(a)linaro.org>
---
Changes in v2:
- Re-worded commit to be more explicit of what it fixes, as Johan suggested
- Used device_set_of_node_from_dev() helper, as per Johan's suggestion
- Added Fixes tag and cc'ed stable
- Link to v1: https://lore.kernel.org/r/20241017-drm-aux-bridge-mark-of-node-reused-v1-1-…
---
drivers/gpu/drm/bridge/aux-bridge.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
index b29980f95379ec7af873ed6e0fb79a9abb663c7b..295e9d031e2dc86cbfd2a7350718fca181c99487 100644
--- a/drivers/gpu/drm/bridge/aux-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -58,9 +58,10 @@ int drm_aux_bridge_register(struct device *parent)
adev->id = ret;
adev->name = "aux_bridge";
adev->dev.parent = parent;
- adev->dev.of_node = of_node_get(parent->of_node);
adev->dev.release = drm_aux_bridge_release;
+ device_set_of_node_from_dev(&adev->dev, parent);
+
ret = auxiliary_device_init(adev);
if (ret) {
ida_free(&drm_aux_bridge_ida, adev->id);
---
base-commit: d61a00525464bfc5fe92c6ad713350988e492b88
change-id: 20241017-drm-aux-bridge-mark-of-node-reused-5c2ee740ff19
Best regards,
--
Abel Vesa <abel.vesa(a)linaro.org>
The unsigned variable `size_t len` is cast to the signed type `loff_t`
when passed to the function check_add_overflow(). This function considers
the type of the destination, which is of type loff_t (signed),
potentially leading to an overflow. This issue is similar to the one
described in the link below.
Remove the cast.
Note that even if check_add_overflow() is bypassed, by setting `len` to
a value that is greater than LONG_MAX (which is considered as a negative
value after the cast), the function copy_from_user(), invoked a few lines
later, will not perform any copy and return `len` as (len > INT_MAX)
causing qat_vf_resume_write() to fail with -EFAULT.
Fixes: bb208810b1ab ("vfio/qat: Add vfio_pci driver for Intel QAT SR-IOV VF devices")
CC: stable(a)vger.kernel.org # 6.10+
Link: https://lore.kernel.org/all/138bd2e2-ede8-4bcc-aa7b-f3d9de167a37@moroto.mou…
Reported-by: Zijie Zhao <zzjas98(a)gmail.com>
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu(a)intel.com>
Reviewed-by: Xin Zeng <xin.zeng(a)intel.com>
---
drivers/vfio/pci/qat/main.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/vfio/pci/qat/main.c b/drivers/vfio/pci/qat/main.c
index e36740a282e7..1e3563fe7cab 100644
--- a/drivers/vfio/pci/qat/main.c
+++ b/drivers/vfio/pci/qat/main.c
@@ -305,7 +305,7 @@ static ssize_t qat_vf_resume_write(struct file *filp, const char __user *buf,
offs = &filp->f_pos;
if (*offs < 0 ||
- check_add_overflow((loff_t)len, *offs, &end))
+ check_add_overflow(len, *offs, &end))
return -EOVERFLOW;
if (end > mig_dev->state_size)
--
2.47.0