xhci-hub.c tracks suspended ports in a suspended_port bitfield.
This is checked when responding to a Get_Status(PORT) request to see if a
port in running U0 state was recently resumed, and adds the required
USB_PORT_STAT_C_SUSPEND change bit in those cases.
The suspended_port bit was left uncleared if a device is disconnected
during suspend. The bit remained set even when a new device was connected
and enumerated. The set bit resulted in a incorrect Get_Status(PORT)
response with a bogus USB_PORT_STAT_C_SUSPEND change
bit set once the new device reached U0 link state.
USB_PORT_STAT_C_SUSPEND change bit is only used for USB2 ports, but
xhci-hub keeps track of both USB2 and USB3 suspended ports.
Cc: stable(a)vger.kernel.org
Reported-by: Wesley Cheng <quic_wcheng(a)quicinc.com>
Closes: https://lore.kernel.org/linux-usb/d68aa806-b26a-0e43-42fb-b8067325e967@quic…
Fixes: 1d5810b6923c ("xhci: Rework port suspend structures for limited ports.")
Tested-by: Wesley Cheng <quic_wcheng(a)quicinc.com>
Signed-off-by: Mathias Nyman <mathias.nyman(a)linux.intel.com>
---
drivers/usb/host/xhci-hub.c | 19 ++++++++++---------
1 file changed, 10 insertions(+), 9 deletions(-)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0054d02239e2..0df5d807a77e 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1062,19 +1062,19 @@ static void xhci_get_usb3_port_status(struct xhci_port *port, u32 *status,
*status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
/* USB3 specific wPortStatus bits */
- if (portsc & PORT_POWER) {
+ if (portsc & PORT_POWER)
*status |= USB_SS_PORT_STAT_POWER;
- /* link state handling */
- if (link_state == XDEV_U0)
- bus_state->suspended_ports &= ~(1 << portnum);
- }
- /* remote wake resume signaling complete */
- if (bus_state->port_remote_wakeup & (1 << portnum) &&
+ /* no longer suspended or resuming */
+ if (link_state != XDEV_U3 &&
link_state != XDEV_RESUME &&
link_state != XDEV_RECOVERY) {
- bus_state->port_remote_wakeup &= ~(1 << portnum);
- usb_hcd_end_port_resume(&hcd->self, portnum);
+ /* remote wake resume signaling complete */
+ if (bus_state->port_remote_wakeup & (1 << portnum)) {
+ bus_state->port_remote_wakeup &= ~(1 << portnum);
+ usb_hcd_end_port_resume(&hcd->self, portnum);
+ }
+ bus_state->suspended_ports &= ~(1 << portnum);
}
xhci_hub_report_usb3_link_state(xhci, status, portsc);
@@ -1131,6 +1131,7 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status,
usb_hcd_end_port_resume(&port->rhub->hcd->self, portnum);
}
port->rexit_active = 0;
+ bus_state->suspended_ports &= ~(1 << portnum);
}
}
--
2.25.1
From: Wesley Cheng <quic_wcheng(a)quicinc.com>
As mentioned in:
commit 474ed23a6257 ("xhci: align the last trb before link if it is
easily splittable.")
A bounce buffer is utilized for ensuring that transfers that span across
ring segments are aligned to the EP's max packet size. However, the device
that is used to map the DMA buffer to is currently using the XHCI HCD,
which does not carry any DMA operations in certain configrations.
Migration to using the sysdev entry was introduced for DWC3 based
implementations where the IOMMU operations are present.
Replace the reference to the controller device to sysdev instead. This
allows the bounce buffer to be properly mapped to any implementations that
have an IOMMU involved.
cc: <stable(a)vger.kernel.org>
Fixes: 4c39d4b949d3 ("usb: xhci: use bus->sysdev for DMA configuration")
Signed-off-by: Wesley Cheng <quic_wcheng(a)quicinc.com>
Signed-off-by: Mathias Nyman <mathias.nyman(a)linux.intel.com>
---
drivers/usb/host/xhci-ring.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 1dde53f6eb31..98389b568633 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -798,7 +798,7 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
struct xhci_ring *ring, struct xhci_td *td)
{
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct xhci_segment *seg = td->bounce_seg;
struct urb *urb = td->urb;
size_t len;
@@ -3469,7 +3469,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
u32 *trb_buff_len, struct xhci_segment *seg)
{
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
unsigned int unalign;
unsigned int max_pkt;
u32 new_buff_len;
--
2.25.1
From: Anthony Krowiak <akrowiak(a)linux.ibm.com>
In the vfio_ap_irq_enable function, after the page containing the
notification indicator byte (NIB) is pinned, the function attempts
to register the guest ISC. If registration fails, the function sets the
status response code and returns without unpinning the page containing
the NIB. In order to avoid a memory leak, the NIB should be unpinned before
returning from the vfio_ap_irq_enable function.
Fixes: 783f0a3ccd79 ("s390/vfio-ap: add s390dbf logging to the vfio_ap_irq_enable function")
Signed-off-by: Janosch Frank <frankja(a)linux.ibm.com>
Signed-off-by: Anthony Krowiak <akrowiak(a)linux.ibm.com>
Cc: <stable(a)vger.kernel.org>
---
drivers/s390/crypto/vfio_ap_ops.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 4db538a55192..9cb28978c186 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -457,6 +457,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n",
__func__, nisc, isc, q->apqn);
+ vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
status.response_code = AP_RESPONSE_INVALID_GISA;
return status;
}
--
2.41.0
On 09/14/ , Felix Kuehling wrote:
> On 2023-09-14 10:02, Christian König wrote:
Do we still need to use legacy flush to emulate heavyweight flush
if we don't use SVM? And can I push this now?
Regards,
Lang
> >
> > Am 14.09.23 um 15:59 schrieb Felix Kuehling:
> > >
> > > On 2023-09-14 9:39, Christian König wrote:
> > > > Is a single legacy flush sufficient to emulate an heavyweight
> > > > flush as well?
> > > >
> > > > On previous generations we needed to issue at least two legacy
> > > > flushes for this.
> > > I assume you are referring to the Vega20 XGMI workaround. That is a
> > > very different issue. Because PTEs would be cached in L2, we had to
> > > always use a heavy-weight flush that would also flush the L2 cache
> > > as well, and follow that with another legacy flush to deal with race
> > > conditions where stale PTEs could be re-fetched from L2 before the
> > > L2 flush was complete.
> >
> > No, we also have another (badly documented) workaround which issues a
> > legacy flush before each heavy weight on some hw generations. See the my
> > TLB flush cleanup patches.
> >
> > >
> > > A heavy-weight flush guarantees that there are no more possible
> > > memory accesses using the old PTEs. With physically addressed caches
> > > on GFXv9 that includes a cache flush because the address translation
> > > happened before putting data into the cache. I think the address
> > > translation and cache architecture works differently on GFXv10. So
> > > maybe the cache-flush is not required here.
> > >
> > > But even then a legacy flush probably allows for in-flight memory
> > > accesses with old physical addresses to complete after the TLB
> > > flush. So there is a small risk of memory corruption that was
> > > assumed to not be accessed by the GPU any more. Or when using IOMMU
> > > device isolation it would result in IOMMU faults if the DMA mappings
> > > are invalidated slightly too early.
> >
> > Mhm, that's quite bad. Any idea how to avoid that?
>
> A few ideas
>
> * Add an arbitrary delay and hope that it is longer than the FIFOs in
> the HW
> * Execute an atomic operation to memory on some GPU engine that could
> act as a fence, maybe just a RELEASE_MEM on the CP to some writeback
> location would do the job
> * If needed, RELEASE_MEM could also perform a cache flush
>
> Regards,
> Felix
>
>
> >
> > Regards,
> > Christian.
> >
> > >
> > > Regards,
> > > Felix
> > >
> > >
> > > >
> > > > And please don't push before getting an rb from Felix as well.
> > > >
> > > > Regards,
> > > > Christian.
> > > >
> > > >
> > > > Am 14.09.23 um 11:23 schrieb Lang Yu:
> > > > > cyan_skilfish has problems with other flush types.
> > > > >
> > > > > v2: fix incorrect ternary conditional operator usage.(Yifan)
> > > > >
> > > > > Signed-off-by: Lang Yu <Lang.Yu(a)amd.com>
> > > > > Cc: <stable(a)vger.kernel.org> # v5.15+
> > > > > ---
> > > > > drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 7 ++++++-
> > > > > 1 file changed, 6 insertions(+), 1 deletion(-)
> > > > >
> > > > > diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
> > > > > b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
> > > > > index d3da13f4c80e..c6d11047169a 100644
> > > > > --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
> > > > > +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
> > > > > @@ -236,7 +236,8 @@ static void
> > > > > gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t
> > > > > vmid,
> > > > > {
> > > > > bool use_semaphore =
> > > > > gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
> > > > > struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
> > > > > - u32 inv_req =
> > > > > hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
> > > > > + u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid,
> > > > > + (adev->asic_type != CHIP_CYAN_SKILLFISH) ?
> > > > > flush_type : 0);
> > > > > u32 tmp;
> > > > > /* Use register 17 for GART */
> > > > > const unsigned int eng = 17;
> > > > > @@ -331,6 +332,8 @@ static void
> > > > > gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t
> > > > > vmid,
> > > > > int r;
> > > > > + flush_type = (adev->asic_type != CHIP_CYAN_SKILLFISH)
> > > > > ? flush_type : 0;
> > > > > +
> > > > > /* flush hdp cache */
> > > > > adev->hdp.funcs->flush_hdp(adev, NULL);
> > > > > @@ -426,6 +429,8 @@ static int
> > > > > gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
> > > > > struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
> > > > > struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
> > > > > + flush_type = (adev->asic_type != CHIP_CYAN_SKILLFISH)
> > > > > ? flush_type : 0;
> > > > > +
> > > > > if (amdgpu_emu_mode == 0 && ring->sched.ready) {
> > > > > spin_lock(&adev->gfx.kiq[0].ring_lock);
> > > > > /* 2 dwords flush + 8 dwords fence */
> > > >
> >
Hi friend I am a banker in ADB BANK. I want to transfer an abandoned
$18.5Million to your Bank account. 50/percent will be your share.
No risk involved but keep it as secret. Contact me for more details.
I am requesting for your urgent assistance to collaborate with you to
move the balance sum of $18.5 Million US Dollars and 950kg of gold
bars, this is a very genuine business that need full concentration,
corporation and transparency between the both of us and its %100 free
from any risk. I agreed to share the funds and gold bars with you in
ratio of %50 %50.
Yours
Mr Pascal Yembiline
The quilt patch titled
Subject: kasan: fix access invalid shadow address when input is illegal
has been removed from the -mm tree. Its filename was
kasan-fix-access-invalid-shadow-address-when-input-is-illegal.patch
This patch was dropped because an updated version will be merged
------------------------------------------------------
From: Haibo Li <haibo.li(a)mediatek.com>
Subject: kasan: fix access invalid shadow address when input is illegal
Date: Thu, 14 Sep 2023 16:08:33 +0800
when the input address is illegal,the corresponding shadow address from
kasan_mem_to_shadow may have no mapping in mmu table. Access such shadow
address causes kernel oops. Here is a sample about oops on arm64(VA
39bit) with KASAN_SW_TAGS on:
[ffffffb80aaaaaaa] pgd=000000005d3ce003, p4d=000000005d3ce003,
pud=000000005d3ce003, pmd=0000000000000000
Internal error: Oops: 0000000096000006 [#1] PREEMPT SMP
Modules linked in:
CPU: 3 PID: 100 Comm: sh Not tainted 6.6.0-rc1-dirty #43
Hardware name: linux,dummy-virt (DT)
pstate: 80000005 (Nzcv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
pc : __hwasan_load8_noabort+0x5c/0x90
lr : do_ib_ob+0xf4/0x110
ffffffb80aaaaaaa is the shadow address for efffff80aaaaaaaa.
The problem is reading invalid shadow in kasan_check_range.
The generic kasan also has similar oops.
To fix it,check shadow address by reading it with no fault.
After this patch,KASAN is able to report invalid memory access
for this case.
Link: https://lkml.kernel.org/r/20230914080833.50026-1-haibo.li@mediatek.com
Signed-off-by: Haibo Li <haibo.li(a)mediatek.com>
Cc: Alexander Potapenko <glider(a)google.com>
Cc: Andrey Konovalov <andreyknvl(a)gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a(a)gmail.com>
Cc: AngeloGioacchino Del Regno <angelogioacchino.delregno(a)collabora.com>
Cc: Dmitry Vyukov <dvyukov(a)google.com>
Cc: Matthias Brugger <matthias.bgg(a)gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino(a)arm.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/kasan/kasan.h | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
--- a/mm/kasan/kasan.h~kasan-fix-access-invalid-shadow-address-when-input-is-illegal
+++ a/mm/kasan/kasan.h
@@ -304,8 +304,17 @@ static __always_inline bool addr_has_met
#ifdef __HAVE_ARCH_SHADOW_MAP
return (kasan_mem_to_shadow((void *)addr) != NULL);
#else
- return (kasan_reset_tag(addr) >=
- kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
+ u8 *shadow, shadow_val;
+
+ if (kasan_reset_tag(addr) <
+ kasan_shadow_to_mem((void *)KASAN_SHADOW_START))
+ return false;
+ /* use read with nofault to check whether the shadow is accessible */
+ shadow = kasan_mem_to_shadow((void *)addr);
+ __get_kernel_nofault(&shadow_val, shadow, u8, fault);
+ return true;
+fault:
+ return false;
#endif
}
_
Patches currently in -mm which might be from haibo.li(a)mediatek.com are