From nobody Sat Oct 18 02:17:38 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 176A8C4332F for ; Wed, 19 Oct 2022 13:48:52 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233516AbiJSNsu (ORCPT ); Wed, 19 Oct 2022 09:48:50 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:56792 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233491AbiJSNrS (ORCPT ); Wed, 19 Oct 2022 09:47:18 -0400 Received: from ams.source.kernel.org (ams.source.kernel.org [145.40.68.75]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id DCE5DE8A80; Wed, 19 Oct 2022 06:32:29 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ams.source.kernel.org (Postfix) with ESMTPS id 463BAB8245B; Wed, 19 Oct 2022 09:02:01 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 9DD2CC433D6; Wed, 19 Oct 2022 09:01:59 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=linuxfoundation.org; s=korg; t=1666170120; bh=B9OXRxYm/Rh3Qqarh2RqipaamNVnRGmhXltBMzQXJ+M=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=DrI4cuCVxeN/BzdyrEFHp8az87uMH1GPmItijjskqOl3nzFA/N/Rbst+/mKDKWTtW 4PVNGy8wt7RecGMHOGngC2/MwqQ7X4SDOKdL+I2Mb1Wuz/MsXEd4k9kgkKHY7DkHzE 7Plc1JUE+CCUidFjtF9aAg9I3clld27X57trB0ts= From: Greg Kroah-Hartman To: linux-kernel@vger.kernel.org Cc: Greg Kroah-Hartman , stable@vger.kernel.org, Dan Carpenter , Shiraz Saleem , Leon Romanovsky , Sasha Levin Subject: [PATCH 6.0 535/862] RDMA/irdma: Validate udata inlen and outlen Date: Wed, 19 Oct 2022 10:30:22 +0200 Message-Id: <20221019083313.599857915@linuxfoundation.org> X-Mailer: git-send-email 2.38.0 In-Reply-To: <20221019083249.951566199@linuxfoundation.org> References: <20221019083249.951566199@linuxfoundation.org> User-Agent: quilt/0.67 MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Shiraz Saleem [ Upstream commit 34acb833cc83bdea912a160ff99b537e62bb4cf3 ] Currently ib_copy_from_udata and ib_copy_to_udata could underfill the request and response buffer if the user-space passes an undersized value for udata->inlen or udata->outlen respectively [1] This could lead to undesirable behavior. Zero initing the buffer only goes as far as preventing using the buffer uninitialized. Validate udata->inlen and udata->outlen passed from user-space to ensure they are at least the required minimum size. [1] https://lore.kernel.org/linux-rdma/MWHPR11MB0029F37D40D9D4A993F8F549E9D= 79@MWHPR11MB0029.namprd11.prod.outlook.com/ Fixes: b48c24c2d710 ("RDMA/irdma: Implement device supported verb APIs") Reported-by: Dan Carpenter Signed-off-by: Shiraz Saleem Link: https://lore.kernel.org/r/20220907191324.1173-3-shiraz.saleem@intel.c= om Signed-off-by: Leon Romanovsky Signed-off-by: Sasha Levin --- drivers/infiniband/hw/irdma/verbs.c | 67 ++++++++++++++++++++++++++--- 1 file changed, 60 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/ir= dma/verbs.c index 6f07a913ef88..a22afbb25bc5 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -299,13 +299,19 @@ static void irdma_alloc_push_page(struct irdma_qp *iw= qp) static int irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { +#define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_uconte= xt_req, rsvd8) +#define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucont= ext_resp, rsvd) struct ib_device *ibdev =3D uctx->device; struct irdma_device *iwdev =3D to_iwdev(ibdev); - struct irdma_alloc_ucontext_req req; + struct irdma_alloc_ucontext_req req =3D {}; struct irdma_alloc_ucontext_resp uresp =3D {}; struct irdma_ucontext *ucontext =3D to_ucontext(uctx); struct irdma_uk_attrs *uk_attrs; =20 + if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN || + udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN) + return -EINVAL; + if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) return -EINVAL; =20 @@ -317,7 +323,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uct= x, =20 uk_attrs =3D &iwdev->rf->sc_dev.hw_attrs.uk_attrs; /* GEN_1 legacy support with libi40iw */ - if (udata->outlen < sizeof(uresp)) { + if (udata->outlen =3D=3D IRDMA_ALLOC_UCTX_MIN_RESP_LEN) { if (uk_attrs->hw_rev !=3D IRDMA_GEN_1) return -EOPNOTSUPP; =20 @@ -389,6 +395,7 @@ static void irdma_dealloc_ucontext(struct ib_ucontext *= context) */ static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) { +#define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp= , rsvd) struct irdma_pd *iwpd =3D to_iwpd(pd); struct irdma_device *iwdev =3D to_iwdev(pd->device); struct irdma_sc_dev *dev =3D &iwdev->rf->sc_dev; @@ -398,6 +405,9 @@ static int irdma_alloc_pd(struct ib_pd *pd, struct ib_u= data *udata) u32 pd_id =3D 0; int err; =20 + if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN) + return -EINVAL; + err =3D irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id, &rf->next_pd); if (err) @@ -814,12 +824,14 @@ static int irdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { +#define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req= , user_compl_ctx) +#define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_re= sp, rsvd) struct ib_pd *ibpd =3D ibqp->pd; struct irdma_pd *iwpd =3D to_iwpd(ibpd); struct irdma_device *iwdev =3D to_iwdev(ibpd->device); struct irdma_pci_f *rf =3D iwdev->rf; struct irdma_qp *iwqp =3D to_iwqp(ibqp); - struct irdma_create_qp_req req; + struct irdma_create_qp_req req =3D {}; struct irdma_create_qp_resp uresp =3D {}; u32 qp_num =3D 0; int err_code; @@ -836,6 +848,10 @@ static int irdma_create_qp(struct ib_qp *ibqp, if (err_code) return err_code; =20 + if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN || + udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN)) + return -EINVAL; + sq_size =3D init_attr->cap.max_send_wr; rq_size =3D init_attr->cap.max_recv_wr; =20 @@ -1120,6 +1136,8 @@ static int irdma_query_pkey(struct ib_device *ibdev, = u32 port, u16 index, int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { +#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req= , rq_flush) +#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_re= sp, push_valid) struct irdma_pd *iwpd =3D to_iwpd(ibqp->pd); struct irdma_qp *iwqp =3D to_iwqp(ibqp); struct irdma_device *iwdev =3D iwqp->iwdev; @@ -1138,6 +1156,13 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct = ib_qp_attr *attr, roce_info =3D &iwqp->roce_info; udp_info =3D &iwqp->udp_info; =20 + if (udata) { + /* udata inlen/outlen can be 0 when supporting legacy libi40iw */ + if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) || + (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN)) + return -EINVAL; + } + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; =20 @@ -1374,7 +1399,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct i= b_qp_attr *attr, =20 if (iwqp->iwarp_state =3D=3D IRDMA_QP_STATE_ERROR) { spin_unlock_irqrestore(&iwqp->lock, flags); - if (udata) { + if (udata && udata->inlen) { if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen))) return -EINVAL; @@ -1426,7 +1451,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct i= b_qp_attr *attr, } else { iwqp->ibqp_state =3D attr->qp_state; } - if (udata && dev->hw_attrs.uk_attrs.hw_rev >=3D IRDMA_GEN_2) { + if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >=3D IRDMA_G= EN_2) { struct irdma_ucontext *ucontext; =20 ucontext =3D rdma_udata_to_drv_context(udata, @@ -1466,6 +1491,8 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct i= b_qp_attr *attr, int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_= mask, struct ib_udata *udata) { +#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req= , rq_flush) +#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_re= sp, push_valid) struct irdma_qp *iwqp =3D to_iwqp(ibqp); struct irdma_device *iwdev =3D iwqp->iwdev; struct irdma_sc_dev *dev =3D &iwdev->rf->sc_dev; @@ -1480,6 +1507,13 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp= _attr *attr, int attr_mask, int err; unsigned long flags; =20 + if (udata) { + /* udata inlen/outlen can be 0 when supporting legacy libi40iw */ + if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) || + (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN)) + return -EINVAL; + } + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; =20 @@ -1565,7 +1599,7 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_= attr *attr, int attr_mask, case IB_QPS_RESET: if (iwqp->iwarp_state =3D=3D IRDMA_QP_STATE_ERROR) { spin_unlock_irqrestore(&iwqp->lock, flags); - if (udata) { + if (udata && udata->inlen) { if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen))) return -EINVAL; @@ -1662,7 +1696,7 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_= attr *attr, int attr_mask, } } } - if (attr_mask & IB_QP_STATE && udata && + if (attr_mask & IB_QP_STATE && udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >=3D IRDMA_GEN_2) { struct irdma_ucontext *ucontext; =20 @@ -1797,6 +1831,7 @@ static int irdma_destroy_cq(struct ib_cq *ib_cq, stru= ct ib_udata *udata) static int irdma_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) { +#define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req= , user_cq_buffer) struct irdma_cq *iwcq =3D to_iwcq(ibcq); struct irdma_sc_dev *dev =3D iwcq->sc_cq.dev; struct irdma_cqp_request *cqp_request; @@ -1819,6 +1854,9 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int en= tries, IRDMA_FEATURE_CQ_RESIZE)) return -EOPNOTSUPP; =20 + if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN) + return -EINVAL; + if (entries > rf->max_cqe) return -EINVAL; =20 @@ -1951,6 +1989,8 @@ static int irdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { +#define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req= , user_cq_buf) +#define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_re= sp, cq_size) struct ib_device *ibdev =3D ibcq->device; struct irdma_device *iwdev =3D to_iwdev(ibdev); struct irdma_pci_f *rf =3D iwdev->rf; @@ -1969,6 +2009,11 @@ static int irdma_create_cq(struct ib_cq *ibcq, err_code =3D cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev= ); if (err_code) return err_code; + + if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN || + udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN)) + return -EINVAL; + err_code =3D irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num, &rf->next_cq); if (err_code) @@ -2746,6 +2791,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *= pd, u64 start, u64 len, u64 virt, int access, struct ib_udata *udata) { +#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq= _pages) struct irdma_device *iwdev =3D to_iwdev(pd->device); struct irdma_ucontext *ucontext; struct irdma_pble_alloc *palloc; @@ -2763,6 +2809,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *= pd, u64 start, u64 len, if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) return ERR_PTR(-EINVAL); =20 + if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN) + return ERR_PTR(-EINVAL); + region =3D ib_umem_get(pd->device, start, len, access); =20 if (IS_ERR(region)) { @@ -4298,12 +4347,16 @@ static int irdma_create_user_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr, struct ib_udata *udata) { +#define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_re= sp, rsvd) struct irdma_ah *ah =3D container_of(ibah, struct irdma_ah, ibah); struct irdma_device *iwdev =3D to_iwdev(ibah->pd->device); struct irdma_create_ah_resp uresp; struct irdma_ah *parent_ah; int err; =20 + if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN) + return -EINVAL; + err =3D irdma_setup_ah(ibah, attr); if (err) return err; --=20 2.35.1