5.10-stable review patch. If anyone has any objections, please let me know.
------------------
From: Chengchang Tang tangchengchang@huawei.com
commit 9e03dbea2b0634b21a45946b4f8097e0dc86ebe1 upstream.
Currently, the affinity between QP cache and CQ cache is not considered when assigning QPN, it will affect the message rate of HW.
Allocate QPN from QP cache with better CQ affinity to get better performance.
Fixes: 71586dd20010 ("RDMA/hns: Create QP with selected QPN for bank load balance") Signed-off-by: Chengchang Tang tangchengchang@huawei.com Signed-off-by: Junxian Huang huangjunxian6@hisilicon.com Link: https://lore.kernel.org/r/20230804012711.808069-5-huangjunxian6@hisilicon.co... Signed-off-by: Leon Romanovsky leon@kernel.org Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org --- drivers/infiniband/hw/hns/hns_roce_device.h | 2 ++ drivers/infiniband/hw/hns/hns_roce_qp.c | 28 ++++++++++++++++++++++------ 2 files changed, 24 insertions(+), 6 deletions(-)
--- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -122,6 +122,8 @@ */ #define EQ_DEPTH_COEFF 2
+#define CQ_BANKID_MASK GENMASK(1, 0) + enum { SERV_TYPE_RC, SERV_TYPE_UC, --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -154,14 +154,29 @@ static void hns_roce_ib_qp_event(struct } }
-static u8 get_least_load_bankid_for_qp(struct hns_roce_bank *bank) +static u8 get_affinity_cq_bank(u8 qp_bank) { - u32 least_load = bank[0].inuse; + return (qp_bank >> 1) & CQ_BANKID_MASK; +} + +static u8 get_least_load_bankid_for_qp(struct ib_qp_init_attr *init_attr, + struct hns_roce_bank *bank) +{ +#define INVALID_LOAD_QPNUM 0xFFFFFFFF + struct ib_cq *scq = init_attr->send_cq; + u32 least_load = INVALID_LOAD_QPNUM; + unsigned long cqn = 0; u8 bankid = 0; u32 bankcnt; u8 i;
- for (i = 1; i < HNS_ROCE_QP_BANK_NUM; i++) { + if (scq) + cqn = to_hr_cq(scq)->cqn; + + for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) { + if (scq && (get_affinity_cq_bank(i) != (cqn & CQ_BANKID_MASK))) + continue; + bankcnt = bank[i].inuse; if (bankcnt < least_load) { least_load = bankcnt; @@ -193,7 +208,8 @@ static int alloc_qpn_with_bankid(struct
return 0; } -static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) +static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; unsigned long num = 0; @@ -211,7 +227,7 @@ static int alloc_qpn(struct hns_roce_dev hr_qp->doorbell_qpn = 1; } else { mutex_lock(&qp_table->bank_mutex); - bankid = get_least_load_bankid_for_qp(qp_table->bank); + bankid = get_least_load_bankid_for_qp(init_attr, qp_table->bank);
ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid, &num); @@ -1005,7 +1021,7 @@ static int hns_roce_create_qp_common(str goto err_db; }
- ret = alloc_qpn(hr_dev, hr_qp); + ret = alloc_qpn(hr_dev, hr_qp, init_attr); if (ret) { ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret); goto err_buf;