From nobody Fri Oct 24 20:20:07 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 2C1E1C25B08 for ; Mon, 15 Aug 2022 23:37:10 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1353705AbiHOXgD (ORCPT ); Mon, 15 Aug 2022 19:36:03 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:43532 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1353520AbiHOX2T (ORCPT ); Mon, 15 Aug 2022 19:28:19 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [139.178.84.217]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 658A714ECA3; Mon, 15 Aug 2022 13:07:49 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 9BD3B60B69; Mon, 15 Aug 2022 20:07:48 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 8EFEFC433D6; Mon, 15 Aug 2022 20:07:47 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=linuxfoundation.org; s=korg; t=1660594068; bh=Ycp0s2Glg8mhvTj7X08fjKGg5VeHENMBJ1BiPadpejc=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=O1ZsTV26xlaci6EevSOFWAn5Ny/oRo5vfCF6c5UzPXn8I37x4BYvIXvEhO1ZI5S4F 6aQMZ0AbZwYrE/MgpB1MUo9AfGeUN0DpwIih/RCLNLFyPCeHE50UviiWfy+VHclBzW UHNo6EHeLMv5gmT/LajrUa+FN0vrHb8uEq31is1o= From: Greg Kroah-Hartman To: linux-kernel@vger.kernel.org Cc: Greg Kroah-Hartman , stable@vger.kernel.org, Hyunchul Lee , Namjae Jeon , Steve French , Sasha Levin Subject: [PATCH 5.18 1050/1095] ksmbd: smbd: introduce read/write credits for RDMA read/write Date: Mon, 15 Aug 2022 20:07:29 +0200 Message-Id: <20220815180512.513026047@linuxfoundation.org> X-Mailer: git-send-email 2.37.2 In-Reply-To: <20220815180429.240518113@linuxfoundation.org> References: <20220815180429.240518113@linuxfoundation.org> User-Agent: quilt/0.67 MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Hyunchul Lee [ Upstream commit ddbdc861e37c168cf2fb8a7b7477f5d18b4daf76 ] SMB2_READ/SMB2_WRITE request has to be granted the number of rw credits, the pages the request wants to transfer / the maximum pages which can be registered with one MR to read and write a file. And allocate enough RDMA resources for the maximum number of rw credits allowed by ksmbd. Signed-off-by: Hyunchul Lee Acked-by: Namjae Jeon Signed-off-by: Steve French Signed-off-by: Sasha Levin --- fs/ksmbd/transport_rdma.c | 120 ++++++++++++++++++++++---------------- 1 file changed, 71 insertions(+), 49 deletions(-) diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c index 479d279ee146..b44a5e584bac 100644 --- a/fs/ksmbd/transport_rdma.c +++ b/fs/ksmbd/transport_rdma.c @@ -80,9 +80,7 @@ static int smb_direct_max_fragmented_recv_size =3D 1024 *= 1024; /* The maximum single-message size which can be received */ static int smb_direct_max_receive_size =3D 8192; =20 -static int smb_direct_max_read_write_size =3D 524224; - -static int smb_direct_max_outstanding_rw_ops =3D 8; +static int smb_direct_max_read_write_size =3D 8 * 1024 * 1024; =20 static LIST_HEAD(smb_direct_device_list); static DEFINE_RWLOCK(smb_direct_device_lock); @@ -147,10 +145,12 @@ struct smb_direct_transport { atomic_t send_credits; spinlock_t lock_new_recv_credits; int new_recv_credits; - atomic_t rw_avail_ops; + int max_rw_credits; + int pages_per_rw_credit; + atomic_t rw_credits; =20 wait_queue_head_t wait_send_credits; - wait_queue_head_t wait_rw_avail_ops; + wait_queue_head_t wait_rw_credits; =20 mempool_t *sendmsg_mempool; struct kmem_cache *sendmsg_cache; @@ -377,7 +377,7 @@ static struct smb_direct_transport *alloc_transport(str= uct rdma_cm_id *cm_id) t->reassembly_queue_length =3D 0; init_waitqueue_head(&t->wait_reassembly_queue); init_waitqueue_head(&t->wait_send_credits); - init_waitqueue_head(&t->wait_rw_avail_ops); + init_waitqueue_head(&t->wait_rw_credits); =20 spin_lock_init(&t->receive_credit_lock); spin_lock_init(&t->recvmsg_queue_lock); @@ -984,18 +984,19 @@ static int smb_direct_flush_send_list(struct smb_dire= ct_transport *t, } =20 static int wait_for_credits(struct smb_direct_transport *t, - wait_queue_head_t *waitq, atomic_t *credits) + wait_queue_head_t *waitq, atomic_t *total_credits, + int needed) { int ret; =20 do { - if (atomic_dec_return(credits) >=3D 0) + if (atomic_sub_return(needed, total_credits) >=3D 0) return 0; =20 - atomic_inc(credits); + atomic_add(needed, total_credits); ret =3D wait_event_interruptible(*waitq, - atomic_read(credits) > 0 || - t->status !=3D SMB_DIRECT_CS_CONNECTED); + atomic_read(total_credits) >=3D needed || + t->status !=3D SMB_DIRECT_CS_CONNECTED); =20 if (t->status !=3D SMB_DIRECT_CS_CONNECTED) return -ENOTCONN; @@ -1016,7 +1017,19 @@ static int wait_for_send_credits(struct smb_direct_t= ransport *t, return ret; } =20 - return wait_for_credits(t, &t->wait_send_credits, &t->send_credits); + return wait_for_credits(t, &t->wait_send_credits, &t->send_credits, 1); +} + +static int wait_for_rw_credits(struct smb_direct_transport *t, int credits) +{ + return wait_for_credits(t, &t->wait_rw_credits, &t->rw_credits, credits); +} + +static int calc_rw_credits(struct smb_direct_transport *t, + char *buf, unsigned int len) +{ + return DIV_ROUND_UP(get_buf_page_count(buf, len), + t->pages_per_rw_credit); } =20 static int smb_direct_create_header(struct smb_direct_transport *t, @@ -1332,8 +1345,8 @@ static void read_write_done(struct ib_cq *cq, struct = ib_wc *wc, smb_direct_disconnect_rdma_connection(t); } =20 - if (atomic_inc_return(&t->rw_avail_ops) > 0) - wake_up(&t->wait_rw_avail_ops); + if (atomic_inc_return(&t->rw_credits) > 0) + wake_up(&t->wait_rw_credits); =20 rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port, msg->sg_list, msg->sgt.nents, dir); @@ -1364,8 +1377,10 @@ static int smb_direct_rdma_xmit(struct smb_direct_tr= ansport *t, struct ib_send_wr *first_wr =3D NULL; u32 remote_key =3D le32_to_cpu(desc[0].token); u64 remote_offset =3D le64_to_cpu(desc[0].offset); + int credits_needed; =20 - ret =3D wait_for_credits(t, &t->wait_rw_avail_ops, &t->rw_avail_ops); + credits_needed =3D calc_rw_credits(t, buf, buf_len); + ret =3D wait_for_rw_credits(t, credits_needed); if (ret < 0) return ret; =20 @@ -1373,7 +1388,7 @@ static int smb_direct_rdma_xmit(struct smb_direct_tra= nsport *t, msg =3D kmalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) + sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL); if (!msg) { - atomic_inc(&t->rw_avail_ops); + atomic_add(credits_needed, &t->rw_credits); return -ENOMEM; } =20 @@ -1382,7 +1397,7 @@ static int smb_direct_rdma_xmit(struct smb_direct_tra= nsport *t, get_buf_page_count(buf, buf_len), msg->sg_list, SG_CHUNK_SIZE); if (ret) { - atomic_inc(&t->rw_avail_ops); + atomic_add(credits_needed, &t->rw_credits); kfree(msg); return -ENOMEM; } @@ -1418,7 +1433,7 @@ static int smb_direct_rdma_xmit(struct smb_direct_tra= nsport *t, return 0; =20 err: - atomic_inc(&t->rw_avail_ops); + atomic_add(credits_needed, &t->rw_credits); if (first_wr) rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port, msg->sg_list, msg->sgt.nents, @@ -1643,11 +1658,19 @@ static int smb_direct_prepare_negotiation(struct sm= b_direct_transport *t) return ret; } =20 +static unsigned int smb_direct_get_max_fr_pages(struct smb_direct_transpor= t *t) +{ + return min_t(unsigned int, + t->cm_id->device->attrs.max_fast_reg_page_list_len, + 256); +} + static int smb_direct_init_params(struct smb_direct_transport *t, struct ib_qp_cap *cap) { struct ib_device *device =3D t->cm_id->device; - int max_send_sges, max_pages, max_rw_wrs, max_send_wrs; + int max_send_sges, max_rw_wrs, max_send_wrs; + unsigned int max_sge_per_wr, wrs_per_credit; =20 /* need 2 more sge. because a SMB_DIRECT header will be mapped, * and maybe a send buffer could be not page aligned. @@ -1659,25 +1682,31 @@ static int smb_direct_init_params(struct smb_direct= _transport *t, return -EINVAL; } =20 - /* - * allow smb_direct_max_outstanding_rw_ops of in-flight RDMA - * read/writes. HCA guarantees at least max_send_sge of sges for - * a RDMA read/write work request, and if memory registration is used, - * we need reg_mr, local_inv wrs for each read/write. + /* Calculate the number of work requests for RDMA R/W. + * The maximum number of pages which can be registered + * with one Memory region can be transferred with one + * R/W credit. And at least 4 work requests for each credit + * are needed for MR registration, RDMA R/W, local & remote + * MR invalidation. */ t->max_rdma_rw_size =3D smb_direct_max_read_write_size; - max_pages =3D DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1; - max_rw_wrs =3D DIV_ROUND_UP(max_pages, SMB_DIRECT_MAX_SEND_SGES); - max_rw_wrs +=3D rdma_rw_mr_factor(device, t->cm_id->port_num, - max_pages) * 2; - max_rw_wrs *=3D smb_direct_max_outstanding_rw_ops; + t->pages_per_rw_credit =3D smb_direct_get_max_fr_pages(t); + t->max_rw_credits =3D DIV_ROUND_UP(t->max_rdma_rw_size, + (t->pages_per_rw_credit - 1) * + PAGE_SIZE); + + max_sge_per_wr =3D min_t(unsigned int, device->attrs.max_send_sge, + device->attrs.max_sge_rd); + wrs_per_credit =3D max_t(unsigned int, 4, + DIV_ROUND_UP(t->pages_per_rw_credit, + max_sge_per_wr) + 1); + max_rw_wrs =3D t->max_rw_credits * wrs_per_credit; =20 max_send_wrs =3D smb_direct_send_credit_target + max_rw_wrs; if (max_send_wrs > device->attrs.max_cqe || max_send_wrs > device->attrs.max_qp_wr) { - pr_err("consider lowering send_credit_target =3D %d, or max_outstanding_= rw_ops =3D %d\n", - smb_direct_send_credit_target, - smb_direct_max_outstanding_rw_ops); + pr_err("consider lowering send_credit_target =3D %d\n", + smb_direct_send_credit_target); pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n= ", device->attrs.max_cqe, device->attrs.max_qp_wr); return -EINVAL; @@ -1712,7 +1741,7 @@ static int smb_direct_init_params(struct smb_direct_t= ransport *t, =20 t->send_credit_target =3D smb_direct_send_credit_target; atomic_set(&t->send_credits, 0); - atomic_set(&t->rw_avail_ops, smb_direct_max_outstanding_rw_ops); + atomic_set(&t->rw_credits, t->max_rw_credits); =20 t->max_send_size =3D smb_direct_max_send_size; t->max_recv_size =3D smb_direct_max_receive_size; @@ -1720,12 +1749,10 @@ static int smb_direct_init_params(struct smb_direct= _transport *t, =20 cap->max_send_wr =3D max_send_wrs; cap->max_recv_wr =3D t->recv_credit_max; - cap->max_send_sge =3D SMB_DIRECT_MAX_SEND_SGES; + cap->max_send_sge =3D max_sge_per_wr; cap->max_recv_sge =3D SMB_DIRECT_MAX_RECV_SGES; cap->max_inline_data =3D 0; - cap->max_rdma_ctxs =3D - rdma_rw_mr_factor(device, t->cm_id->port_num, max_pages) * - smb_direct_max_outstanding_rw_ops; + cap->max_rdma_ctxs =3D t->max_rw_credits; return 0; } =20 @@ -1818,7 +1845,8 @@ static int smb_direct_create_qpair(struct smb_direct_= transport *t, } =20 t->send_cq =3D ib_alloc_cq(t->cm_id->device, t, - t->send_credit_target, 0, IB_POLL_WORKQUEUE); + smb_direct_send_credit_target + cap->max_rdma_ctxs, + 0, IB_POLL_WORKQUEUE); if (IS_ERR(t->send_cq)) { pr_err("Can't create RDMA send CQ\n"); ret =3D PTR_ERR(t->send_cq); @@ -1827,8 +1855,7 @@ static int smb_direct_create_qpair(struct smb_direct_= transport *t, } =20 t->recv_cq =3D ib_alloc_cq(t->cm_id->device, t, - cap->max_send_wr + cap->max_rdma_ctxs, - 0, IB_POLL_WORKQUEUE); + t->recv_credit_max, 0, IB_POLL_WORKQUEUE); if (IS_ERR(t->recv_cq)) { pr_err("Can't create RDMA recv CQ\n"); ret =3D PTR_ERR(t->recv_cq); @@ -1857,17 +1884,12 @@ static int smb_direct_create_qpair(struct smb_direc= t_transport *t, =20 pages_per_rw =3D DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1; if (pages_per_rw > t->cm_id->device->attrs.max_sgl_rd) { - int pages_per_mr, mr_count; - - pages_per_mr =3D min_t(int, pages_per_rw, - t->cm_id->device->attrs.max_fast_reg_page_list_len); - mr_count =3D DIV_ROUND_UP(pages_per_rw, pages_per_mr) * - atomic_read(&t->rw_avail_ops); - ret =3D ib_mr_pool_init(t->qp, &t->qp->rdma_mrs, mr_count, - IB_MR_TYPE_MEM_REG, pages_per_mr, 0); + ret =3D ib_mr_pool_init(t->qp, &t->qp->rdma_mrs, + t->max_rw_credits, IB_MR_TYPE_MEM_REG, + t->pages_per_rw_credit, 0); if (ret) { pr_err("failed to init mr pool count %d pages %d\n", - mr_count, pages_per_mr); + t->max_rw_credits, t->pages_per_rw_credit); goto err; } } --=20 2.35.1