This is a note to let you know that I've just added the patch titled
RDMA/vmw_pvrdma: Fix usage of user response structures in ABI file
to the 4.15-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git%3Ba=su...
The filename of the patch is: rdma-vmw_pvrdma-fix-usage-of-user-response-structures-in-abi-file.patch and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree, please let stable@vger.kernel.org know about it.
From 1f5a6c47aabc4606f91ad2e6ef71a1ff1924101c Mon Sep 17 00:00:00 2001
From: Adit Ranadive aditr@vmware.com Date: Thu, 15 Feb 2018 12:36:46 -0800 Subject: RDMA/vmw_pvrdma: Fix usage of user response structures in ABI file
From: Adit Ranadive aditr@vmware.com
commit 1f5a6c47aabc4606f91ad2e6ef71a1ff1924101c upstream.
This ensures that we return the right structures back to userspace. Otherwise, it looks like the reserved fields in the response structures in userspace might have uninitialized data in them.
Fixes: 8b10ba783c9d ("RDMA/vmw_pvrdma: Add shared receive queue support") Fixes: 29c8d9eba550 ("IB: Add vmw_pvrdma driver") Suggested-by: Jason Gunthorpe jgg@mellanox.com Reviewed-by: Bryan Tan bryantan@vmware.com Reviewed-by: Aditya Sarwade asarwade@vmware.com Reviewed-by: Jorgen Hansen jhansen@vmware.com Signed-off-by: Adit Ranadive aditr@vmware.com Signed-off-by: Jason Gunthorpe jgg@mellanox.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org
--- drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c | 4 +++- drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c | 4 +++- drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c | 4 +++- 3 files changed, 9 insertions(+), 3 deletions(-)
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c @@ -114,6 +114,7 @@ struct ib_cq *pvrdma_create_cq(struct ib union pvrdma_cmd_resp rsp; struct pvrdma_cmd_create_cq *cmd = &req.create_cq; struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp; + struct pvrdma_create_cq_resp cq_resp = {0}; struct pvrdma_create_cq ucmd;
BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64); @@ -198,6 +199,7 @@ struct ib_cq *pvrdma_create_cq(struct ib
cq->ibcq.cqe = resp->cqe; cq->cq_handle = resp->cq_handle; + cq_resp.cqn = resp->cq_handle; spin_lock_irqsave(&dev->cq_tbl_lock, flags); dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq; spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); @@ -206,7 +208,7 @@ struct ib_cq *pvrdma_create_cq(struct ib cq->uar = &(to_vucontext(context)->uar);
/* Copy udata back. */ - if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) { + if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) { dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); pvrdma_destroy_cq(&cq->ibcq); --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c @@ -113,6 +113,7 @@ struct ib_srq *pvrdma_create_srq(struct union pvrdma_cmd_resp rsp; struct pvrdma_cmd_create_srq *cmd = &req.create_srq; struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp; + struct pvrdma_create_srq_resp srq_resp = {0}; struct pvrdma_create_srq ucmd; unsigned long flags; int ret; @@ -204,12 +205,13 @@ struct ib_srq *pvrdma_create_srq(struct }
srq->srq_handle = resp->srqn; + srq_resp.srqn = resp->srqn; spin_lock_irqsave(&dev->srq_tbl_lock, flags); dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq; spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
/* Copy udata back. */ - if (ib_copy_to_udata(udata, &srq->srq_handle, sizeof(__u32))) { + if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) { dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); pvrdma_destroy_srq(&srq->ibsrq); return ERR_PTR(-EINVAL); --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c @@ -447,6 +447,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_ union pvrdma_cmd_resp rsp; struct pvrdma_cmd_create_pd *cmd = &req.create_pd; struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; + struct pvrdma_alloc_pd_resp pd_resp = {0}; int ret; void *ptr;
@@ -475,9 +476,10 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_ pd->privileged = !context; pd->pd_handle = resp->pd_handle; pd->pdn = resp->pd_handle; + pd_resp.pdn = resp->pd_handle;
if (context) { - if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { + if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) { dev_warn(&dev->pdev->dev, "failed to copy back protection domain\n"); pvrdma_dealloc_pd(&pd->ibpd);
Patches currently in stable-queue which might be from aditr@vmware.com are
queue-4.15/rdma-vmw_pvrdma-fix-usage-of-user-response-structures-in-abi-file.patch