This reverts commit d6e020819612a4a06207af858e0978be4d3e3140.
The IS_DGFX check was put in place because error capture of buffer
objects is expected to be broken on devices with VRAM.
We seem to have already submitted the userspace fix to remove that
flag, so lets just rely on that for DG1.
Cc: stable(a)vger.kernel.org # v6.0+
Cc: Ville Syrjälä <ville.syrjala(a)linux.intel.com>
Cc: Andi Shyti <andi.shyti(a)linux.intel.com>
Cc: Matthew Auld <matthew.auld(a)intel.com>
Cc: Thomas Hellström <thomas.hellstrom(a)linux.intel.com>
Cc: Tvrtko Ursulin <tursulin(a)ursulin.net>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen(a)linux.intel.com>
---
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 7d44aadcd5a5..02c59808cbe4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2013,7 +2013,7 @@ static int eb_capture_stage(struct i915_execbuffer *eb)
continue;
if (i915_gem_context_is_recoverable(eb->gem_context) &&
- GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 10))
+ (IS_DGFX(eb->i915) || GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 0)))
return -EINVAL;
for_each_batch_create_order(eb, j) {
--
2.49.0
From: Bartosz Golaszewski <bartosz.golaszewski(a)linaro.org>
Commit 3d05fc82237a ("Bluetooth: qca: set power_ctrl_enabled on NULL
returned by gpiod_get_optional()") accidentally changed the prevous
behavior where power control would be disabled without the BT_EN GPIO
only on QCA_WCN6750 and QCA_WCN6855 while also getting the error check
wrong. We should treat every IS_ERR() return value from
devm_gpiod_get_optional() as a reason to bail-out while we should only
set power_ctrl_enabled to false on the two models mentioned above. While
at it: use dev_err_probe() to save a LOC.
Cc: stable(a)vger.kernel.org
Fixes: 3d05fc82237a ("Bluetooth: qca: set power_ctrl_enabled on NULL returned by gpiod_get_optional()")
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski(a)linaro.org>
---
drivers/bluetooth/hci_qca.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index e00590ba24fdb..a2dc39c005f4f 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -2415,14 +2415,14 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR(qcadev->bt_en) &&
- (data->soc_type == QCA_WCN6750 ||
- data->soc_type == QCA_WCN6855)) {
- dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
- return PTR_ERR(qcadev->bt_en);
- }
+ if (IS_ERR(qcadev->bt_en))
+ return dev_err_probe(&serdev->dev,
+ PTR_ERR(qcadev->bt_en),
+ "failed to acquire BT_EN gpio\n");
- if (!qcadev->bt_en)
+ if (!qcadev->bt_en &&
+ (data->soc_type == QCA_WCN6750 ||
+ data->soc_type == QCA_WCN6855))
power_ctrl_enabled = false;
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
--
2.48.1
Use common wrappers operating directly on the struct sg_table objects to
fix incorrect use of statterlists related calls. dma_unmap_sg() function
has to be called with the number of elements originally passed to the
dma_map_sg() function, not the one returned in sgtable's nents.
CC: stable(a)vger.kernel.org
Fixes: 425902f5c8e3 ("fpga zynq: Use the scatterlist interface")
Signed-off-by: Marek Szyprowski <m.szyprowski(a)samsung.com>
---
drivers/fpga/zynq-fpga.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index f7e08f7ea9ef..9bd39d1d4048 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -406,7 +406,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
}
priv->dma_nelms =
- dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE);
if (priv->dma_nelms == 0) {
dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
return -ENOMEM;
@@ -478,7 +478,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
clk_disable(priv->clk);
out_free:
- dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ dma_unmap_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE);
return err;
}
--
2.34.1
The patch fixes a deadlock which can be triggered by an internal
syzkaller [1] reproducer and captured by bpftrace script [2] and its log
[3] in this scenario:
Process 1 Process 2
--- ---
hugetlb_fault
mutex_lock(B) // take B
filemap_lock_hugetlb_folio
filemap_lock_folio
__filemap_get_folio
folio_lock(A) // take A
hugetlb_wp
mutex_unlock(B) // release B
... hugetlb_fault
... mutex_lock(B) // take B
filemap_lock_hugetlb_folio
filemap_lock_folio
__filemap_get_folio
folio_lock(A) // blocked
unmap_ref_private
...
mutex_lock(B) // retake and blocked
This is a ABBA deadlock involving two locks:
- Lock A: pagecache_folio lock
- Lock B: hugetlb_fault_mutex_table lock
The deadlock occurs between two processes as follows:
1. The first process (let’s call it Process 1) is handling a
copy-on-write (COW) operation on a hugepage via hugetlb_wp. Due to
insufficient reserved hugetlb pages, Process 1, owner of the reserved
hugetlb page, attempts to unmap a hugepage owned by another process
(non-owner) to satisfy the reservation. Before unmapping, Process 1
acquires lock B (hugetlb_fault_mutex_table lock) and then lock A
(pagecache_folio lock). To proceed with the unmap, it releases Lock B
but retains Lock A. After the unmap, Process 1 tries to reacquire Lock
B. However, at this point, Lock B has already been acquired by another
process.
2. The second process (Process 2) enters the hugetlb_fault handler
during the unmap operation. It successfully acquires Lock B
(hugetlb_fault_mutex_table lock) that was just released by Process 1,
but then attempts to acquire Lock A (pagecache_folio lock), which is
still held by Process 1.
As a result, Process 1 (holding Lock A) is blocked waiting for Lock B
(held by Process 2), while Process 2 (holding Lock B) is blocked waiting
for Lock A (held by Process 1), constructing a ABBA deadlock scenario.
The solution here is to unlock the pagecache_folio and provide the
pagecache_folio_unlocked variable to the caller to have the visibility
over the pagecache_folio status for subsequent handling.
The error message:
INFO: task repro_20250402_:13229 blocked for more than 64 seconds.
Not tainted 6.15.0-rc3+ #24
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:repro_20250402_ state:D stack:25856 pid:13229 tgid:13228 ppid:3513 task_flags:0x400040 flags:0x00004006
Call Trace:
<TASK>
__schedule+0x1755/0x4f50
schedule+0x158/0x330
schedule_preempt_disabled+0x15/0x30
__mutex_lock+0x75f/0xeb0
hugetlb_wp+0xf88/0x3440
hugetlb_fault+0x14c8/0x2c30
trace_clock_x86_tsc+0x20/0x20
do_user_addr_fault+0x61d/0x1490
exc_page_fault+0x64/0x100
asm_exc_page_fault+0x26/0x30
RIP: 0010:__put_user_4+0xd/0x20
copy_process+0x1f4a/0x3d60
kernel_clone+0x210/0x8f0
__x64_sys_clone+0x18d/0x1f0
do_syscall_64+0x6a/0x120
entry_SYSCALL_64_after_hwframe+0x76/0x7e
RIP: 0033:0x41b26d
</TASK>
INFO: task repro_20250402_:13229 is blocked on a mutex likely owned by task repro_20250402_:13250.
task:repro_20250402_ state:D stack:28288 pid:13250 tgid:13228 ppid:3513 task_flags:0x400040 flags:0x00000006
Call Trace:
<TASK>
__schedule+0x1755/0x4f50
schedule+0x158/0x330
io_schedule+0x92/0x110
folio_wait_bit_common+0x69a/0xba0
__filemap_get_folio+0x154/0xb70
hugetlb_fault+0xa50/0x2c30
trace_clock_x86_tsc+0x20/0x20
do_user_addr_fault+0xace/0x1490
exc_page_fault+0x64/0x100
asm_exc_page_fault+0x26/0x30
RIP: 0033:0x402619
</TASK>
INFO: task repro_20250402_:13250 blocked for more than 65 seconds.
Not tainted 6.15.0-rc3+ #24
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:repro_20250402_ state:D stack:28288 pid:13250 tgid:13228 ppid:3513 task_flags:0x400040 flags:0x00000006
Call Trace:
<TASK>
__schedule+0x1755/0x4f50
schedule+0x158/0x330
io_schedule+0x92/0x110
folio_wait_bit_common+0x69a/0xba0
__filemap_get_folio+0x154/0xb70
hugetlb_fault+0xa50/0x2c30
trace_clock_x86_tsc+0x20/0x20
do_user_addr_fault+0xace/0x1490
exc_page_fault+0x64/0x100
asm_exc_page_fault+0x26/0x30
RIP: 0033:0x402619
</TASK>
Showing all locks held in the system:
1 lock held by khungtaskd/35:
#0: ffffffff879a7440 (rcu_read_lock){....}-{1:3}, at: debug_show_all_locks+0x30/0x180
2 locks held by repro_20250402_/13229:
#0: ffff888017d801e0 (&mm->mmap_lock){++++}-{4:4}, at: lock_mm_and_find_vma+0x37/0x300
#1: ffff888000fec848 (&hugetlb_fault_mutex_table[i]){+.+.}-{4:4}, at: hugetlb_wp+0xf88/0x3440
3 locks held by repro_20250402_/13250:
#0: ffff8880177f3d08 (vm_lock){++++}-{0:0}, at: do_user_addr_fault+0x41b/0x1490
#1: ffff888000fec848 (&hugetlb_fault_mutex_table[i]){+.+.}-{4:4}, at: hugetlb_fault+0x3b8/0x2c30
#2: ffff8880129500e8 (&resv_map->rw_sema){++++}-{4:4}, at: hugetlb_fault+0x494/0x2c30
Link: https://drive.google.com/file/d/1DVRnIW-vSayU5J1re9Ct_br3jJQU6Vpb/view?usp=… [1]
Link: https://github.com/bboymimi/bpftracer/blob/master/scripts/hugetlb_lock_debu… [2]
Link: https://drive.google.com/file/d/1bWq2-8o-BJAuhoHWX7zAhI6ggfhVzQUI/view?usp=… [3]
Fixes: 40549ba8f8e0 ("hugetlb: use new vma_lock for pmd sharing synchronization")
Cc: <stable(a)vger.kernel.org>
Cc: Hugh Dickins <hughd(a)google.com>
Cc: Florent Revest <revest(a)google.com>
Cc: Gavin Shan <gshan(a)redhat.com>
Signed-off-by: Gavin Guo <gavinguo(a)igalia.com>
---
mm/hugetlb.c | 33 ++++++++++++++++++++++++++++-----
1 file changed, 28 insertions(+), 5 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e3e6ac991b9c..ad54a74aa563 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6115,7 +6115,8 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
* Keep the pte_same checks anyway to make transition from the mutex easier.
*/
static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
- struct vm_fault *vmf)
+ struct vm_fault *vmf,
+ bool *pagecache_folio_unlocked)
{
struct vm_area_struct *vma = vmf->vma;
struct mm_struct *mm = vma->vm_mm;
@@ -6212,6 +6213,22 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
u32 hash;
folio_put(old_folio);
+ /*
+ * The pagecache_folio needs to be unlocked to avoid
+ * deadlock and we won't re-lock it in hugetlb_wp(). The
+ * pagecache_folio could be truncated after being
+ * unlocked. So its state should not be relied
+ * subsequently.
+ *
+ * Setting *pagecache_folio_unlocked to true allows the
+ * caller to handle any necessary logic related to the
+ * folio's unlocked state.
+ */
+ if (pagecache_folio) {
+ folio_unlock(pagecache_folio);
+ if (pagecache_folio_unlocked)
+ *pagecache_folio_unlocked = true;
+ }
/*
* Drop hugetlb_fault_mutex and vma_lock before
* unmapping. unmapping needs to hold vma_lock
@@ -6566,7 +6583,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
hugetlb_count_add(pages_per_huge_page(h), mm);
if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
- ret = hugetlb_wp(folio, vmf);
+ ret = hugetlb_wp(folio, vmf, NULL);
}
spin_unlock(vmf->ptl);
@@ -6638,6 +6655,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
struct hstate *h = hstate_vma(vma);
struct address_space *mapping;
int need_wait_lock = 0;
+ bool pagecache_folio_unlocked = false;
struct vm_fault vmf = {
.vma = vma,
.address = address & huge_page_mask(h),
@@ -6792,7 +6810,8 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
if (!huge_pte_write(vmf.orig_pte)) {
- ret = hugetlb_wp(pagecache_folio, &vmf);
+ ret = hugetlb_wp(pagecache_folio, &vmf,
+ &pagecache_folio_unlocked);
goto out_put_page;
} else if (likely(flags & FAULT_FLAG_WRITE)) {
vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte);
@@ -6809,10 +6828,14 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
out_ptl:
spin_unlock(vmf.ptl);
- if (pagecache_folio) {
+ /*
+ * If the pagecache_folio is unlocked in hugetlb_wp(), we skip
+ * folio_unlock() here.
+ */
+ if (pagecache_folio && !pagecache_folio_unlocked)
folio_unlock(pagecache_folio);
+ if (pagecache_folio)
folio_put(pagecache_folio);
- }
out_mutex:
hugetlb_vma_unlock_read(vma);
base-commit: d76bb1ebb5587f66b0f8b8099bfbb44722bc08b3
--
2.43.0
On Mon, May 26, 2025 at 02:40:33PM +0000, Parav Pandit wrote:
> When the PCI device is surprise-removed, requests may not complete on
> the device as the VQ is marked as broken. As a result, disk deletion
> hangs.
>
> Fix it by aborting the requests when the VQ is broken.
>
> With this fix now fio completes swiftly.
> An alternative of IO timeout has been considered, however
> when the driver knows about unresponsive block device, swiftly clearing
> them enables users and upper layers to react quickly.
>
> Verified with multiple device unplug iterations with pending requests in
> virtio used ring and some pending with the device.
>
> Fixes: 43bb40c5b926 ("virtio_pci: Support surprise removal of virtio pci device")
> Cc: stable(a)vger.kernel.org
> Reported-by: Li RongQing <lirongqing(a)baidu.com>
> Closes: https://lore.kernel.org/virtualization/c45dd68698cd47238c55fb73ca9b4741@bai…
> Reviewed-by: Max Gurtovoy <mgurtovoy(a)nvidia.com>
> Reviewed-by: Israel Rukshin <israelr(a)nvidia.com>
> Signed-off-by: Parav Pandit <parav(a)nvidia.com>
Thanks!
I like the patch. Yes something to improve:
> ---
> v1->v2:
> - Addressed comments from Stephan
> - fixed spelling to 'waiting'
> - Addressed comments from Michael
> - Dropped checking broken vq from queue_rq() and queue_rqs()
> because it is checked in lower layer routines in virtio core
>
> v0->v1:
> - Fixed comments from Stefan to rename a cleanup function
> - Improved logic for handling any outstanding requests
> in bio layer
> - improved cancel callback to sync with ongoing done()
> ---
> drivers/block/virtio_blk.c | 83 ++++++++++++++++++++++++++++++++++++++
> 1 file changed, 83 insertions(+)
>
> diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
> index 7cffea01d868..12f31e6c00cb 100644
> --- a/drivers/block/virtio_blk.c
> +++ b/drivers/block/virtio_blk.c
> @@ -1554,6 +1554,87 @@ static int virtblk_probe(struct virtio_device *vdev)
> return err;
> }
>
> +static bool virtblk_request_cancel(struct request *rq, void *data)
> +{
> + struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
> + struct virtio_blk *vblk = data;
> + struct virtio_blk_vq *vq;
> + unsigned long flags;
> +
> + vq = &vblk->vqs[rq->mq_hctx->queue_num];
> +
> + spin_lock_irqsave(&vq->lock, flags);
> +
> + vbr->in_hdr.status = VIRTIO_BLK_S_IOERR;
My undertanding is that this is only safe to call when device
is not accessing the vq anymore? Right? otherwise device can
overwrite the status?
But I am not sure I understand what guarantees this.
Is there an assumption here that if vq is broken
and we are on remove path then device is already gone?
It seems to hold but
I'd prefer something that makes this guarantee at the API
level.
> + if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq))
> + blk_mq_complete_request(rq);
> +
> + spin_unlock_irqrestore(&vq->lock, flags);
> + return true;
> +}
> +
> +static void virtblk_broken_device_cleanup(struct virtio_blk *vblk)
> +{
> + struct request_queue *q = vblk->disk->queue;
> +
> + if (!virtqueue_is_broken(vblk->vqs[0].vq))
> + return;
can marking vqs broken be in progress such that 0
is already broken but another one is not, yet?
if not pls add a comment explainging why.
> +
> + /* Start freezing the queue, so that new requests keeps waiting at the
> + * door of bio_queue_enter(). We cannot fully freeze the queue because
> + * freezed queue is an empty queue and there are pending requests, so
> + * only start freezing it.
> + */
> + blk_freeze_queue_start(q);
> +
> + /* When quiescing completes, all ongoing dispatches have completed
> + * and no new dispatch will happen towards the driver.
> + * This ensures that later when cancel is attempted, then are not
> + * getting processed by the queue_rq() or queue_rqs() handlers.
> + */
> + blk_mq_quiesce_queue(q);
> +
> + /*
> + * Synchronize with any ongoing VQ callbacks, effectively quiescing
> + * the device and preventing it from completing further requests
> + * to the block layer. Any outstanding, incomplete requests will be
> + * completed by virtblk_request_cancel().
I think what you really mean is:
finish processing in callbacks, that might have started before vqs were marked as
broken.
> + */
> + virtio_synchronize_cbs(vblk->vdev);
> +
> + /* At this point, no new requests can enter the queue_rq() and
> + * completion routine will not complete any new requests either for the
> + * broken vq. Hence, it is safe to cancel all requests which are
> + * started.
> + */
> + blk_mq_tagset_busy_iter(&vblk->tag_set, virtblk_request_cancel, vblk);
> + blk_mq_tagset_wait_completed_request(&vblk->tag_set);
> +
> + /* All pending requests are cleaned up. Time to resume so that disk
> + * deletion can be smooth. Start the HW queues so that when queue is
> + * unquiesced requests can again enter the driver.
> + */
> + blk_mq_start_stopped_hw_queues(q, true);
> +
> + /* Unquiescing will trigger dispatching any pending requests to the
> + * driver which has crossed bio_queue_enter() to the driver.
> + */
> + blk_mq_unquiesce_queue(q);
> +
> + /* Wait for all pending dispatches to terminate which may have been
> + * initiated after unquiescing.
> + */
> + blk_mq_freeze_queue_wait(q);
> +
> + /* Mark the disk dead so that once queue unfreeze, the requests
> + * waiting at the door of bio_queue_enter() can be aborted right away.
> + */
> + blk_mark_disk_dead(vblk->disk);
> +
> + /* Unfreeze the queue so that any waiting requests will be aborted. */
> + blk_mq_unfreeze_queue_nomemrestore(q);
> +}
> +
> static void virtblk_remove(struct virtio_device *vdev)
> {
> struct virtio_blk *vblk = vdev->priv;
> @@ -1561,6 +1642,8 @@ static void virtblk_remove(struct virtio_device *vdev)
> /* Make sure no work handler is accessing the device. */
> flush_work(&vblk->config_work);
>
> + virtblk_broken_device_cleanup(vblk);
> +
> del_gendisk(vblk->disk);
> blk_mq_free_tag_set(&vblk->tag_set);
>
> --
> 2.34.1
The function mod_hdcp_hdcp1_enable_encryption() calls the function
get_first_active_display(), but does not check its return value.
The return value is a null pointer if the display list is empty.
This will lead to a null pointer dereference in
mod_hdcp_hdcp2_enable_encryption().
Add a null pointer check for get_first_active_display() and return
MOD_HDCP_STATUS_DISPLAY_NOT_FOUND if the function return null.
Fixes: 2deade5ede56 ("drm/amd/display: Remove hdcp display state with mst fix")
Cc: stable(a)vger.kernel.org # v5.8
Signed-off-by: Wentao Liang <vulab(a)iscas.ac.cn>
---
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 8c137d7c032e..e58e7b93810b 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -368,6 +368,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
struct mod_hdcp_display *display = get_first_active_display(hdcp);
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
--
2.42.0.windows.2