From nobody Tue Dec 16 22:12:20 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 5566B28137B; Wed, 23 Apr 2025 08:14:52 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1745396093; cv=none; b=qv/rDhwOkmXUPCjKEbGguqDbeCbpBltq1++OR5F0fuVFsHL3CeP2OCgEm7FGEU0ulIdbZHubV+5RVpAn2HUHpoZvBlHUwVikTn3ggZ9nXEX/6zI4oBuYiKqtGxhQ1ggzCf0RqgiN5+f7GkO/vrz9x+uVLB3lWL4Njap0c2NppKc= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1745396093; c=relaxed/simple; bh=Z+8b4NYee1RUbpzKZ39zN1WZWDjcticSFOudJv0T43g=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=bZfr+MLZo7CICYD1TzkT+PhqZFa/44CT23r1YInwbeqNa15cZYeS73zv0T1e+XNurxhlmjohvomOTANJ3e1QA7wz1RPee5Urd2B9UoeQp/jYjRwBRIhNJ+BfYnYe0hpST8kErrxobKN+cyf4gAeRnJUh3+Fj47PTPf8DXwws9yA= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=qE+s9EHz; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="qE+s9EHz" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 11E5CC4CEE2; Wed, 23 Apr 2025 08:14:52 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1745396092; bh=Z+8b4NYee1RUbpzKZ39zN1WZWDjcticSFOudJv0T43g=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=qE+s9EHzPEDuLSGuHtZ28CtFY2uP1N7/71Y4qUuPA1VYvkHQi1X+QIdGvBh1kufix fyFBxbj8RS7eYnjPsDL0TULXuy3CIGPdwZinB3JLbx7zwzwCdkWGBEdJb4rhp/mmlF ZuZQZWT0dUpbmDgvKrxUcQyTBH0c17V53Mc/U4iAdtOHRaot1vxpB8jMvVzxUs7QfG UNkbegqVsrxw8/dJf9Z9OhMKsfazIMs85/DY+x5vhf90LKB8C8VfJHyBOQRu8OfoAk RpgqK8RnCT7JmT1h6krz1xV5dJzRsOL4/Www3v4q0jwuz9VZy6JbpltYmdVtDzX+T5 1cx0j+QaWq69A== From: Leon Romanovsky To: Marek Szyprowski , Jens Axboe , Christoph Hellwig , Keith Busch Cc: Leon Romanovsky , Jake Edge , Jonathan Corbet , Jason Gunthorpe , Zhu Yanjun , Robin Murphy , Joerg Roedel , Will Deacon , Sagi Grimberg , Bjorn Helgaas , Logan Gunthorpe , Yishai Hadas , Shameer Kolothum , Kevin Tian , Alex Williamson , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Andrew Morton , linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org, linux-block@vger.kernel.org, linux-rdma@vger.kernel.org, iommu@lists.linux.dev, linux-nvme@lists.infradead.org, linux-pci@vger.kernel.org, kvm@vger.kernel.org, linux-mm@kvack.org, Niklas Schnelle , Chuck Lever , Luis Chamberlain , Matthew Wilcox , Dan Williams , Kanchan Joshi , Chaitanya Kulkarni Subject: [PATCH v9 14/24] RDMA/umem: Separate implicit ODP initialization from explicit ODP Date: Wed, 23 Apr 2025 11:13:05 +0300 Message-ID: X-Mailer: git-send-email 2.49.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky Create separate functions for the implicit ODP initialization which is different from the explicit ODP initialization. Tested-by: Jens Axboe Signed-off-by: Leon Romanovsky Reviewed-by: Jason Gunthorpe --- drivers/infiniband/core/umem_odp.c | 91 +++++++++++++++--------------- 1 file changed, 46 insertions(+), 45 deletions(-) diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/u= mem_odp.c index 30cd8f353476..51d518989914 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -48,41 +48,44 @@ =20 #include "uverbs.h" =20 -static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp, - const struct mmu_interval_notifier_ops *ops) +static void ib_init_umem_implicit_odp(struct ib_umem_odp *umem_odp) +{ + umem_odp->is_implicit_odp =3D 1; + umem_odp->umem.is_odp =3D 1; + mutex_init(&umem_odp->umem_mutex); +} + +static int ib_init_umem_odp(struct ib_umem_odp *umem_odp, + const struct mmu_interval_notifier_ops *ops) { struct ib_device *dev =3D umem_odp->umem.ibdev; + size_t page_size =3D 1UL << umem_odp->page_shift; + unsigned long start; + unsigned long end; int ret; =20 umem_odp->umem.is_odp =3D 1; mutex_init(&umem_odp->umem_mutex); =20 - if (!umem_odp->is_implicit_odp) { - size_t page_size =3D 1UL << umem_odp->page_shift; - unsigned long start; - unsigned long end; - - start =3D ALIGN_DOWN(umem_odp->umem.address, page_size); - if (check_add_overflow(umem_odp->umem.address, - (unsigned long)umem_odp->umem.length, - &end)) - return -EOVERFLOW; - end =3D ALIGN(end, page_size); - if (unlikely(end < page_size)) - return -EOVERFLOW; - - ret =3D hmm_dma_map_alloc(dev->dma_device, &umem_odp->map, - (end - start) >> PAGE_SHIFT, - 1 << umem_odp->page_shift); - if (ret) - return ret; - - ret =3D mmu_interval_notifier_insert(&umem_odp->notifier, - umem_odp->umem.owning_mm, - start, end - start, ops); - if (ret) - goto out_free_map; - } + start =3D ALIGN_DOWN(umem_odp->umem.address, page_size); + if (check_add_overflow(umem_odp->umem.address, + (unsigned long)umem_odp->umem.length, &end)) + return -EOVERFLOW; + end =3D ALIGN(end, page_size); + if (unlikely(end < page_size)) + return -EOVERFLOW; + + ret =3D hmm_dma_map_alloc(dev->dma_device, &umem_odp->map, + (end - start) >> PAGE_SHIFT, + 1 << umem_odp->page_shift); + if (ret) + return ret; + + ret =3D mmu_interval_notifier_insert(&umem_odp->notifier, + umem_odp->umem.owning_mm, start, + end - start, ops); + if (ret) + goto out_free_map; =20 return 0; =20 @@ -106,7 +109,6 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct i= b_device *device, { struct ib_umem *umem; struct ib_umem_odp *umem_odp; - int ret; =20 if (access & IB_ACCESS_HUGETLB) return ERR_PTR(-EINVAL); @@ -118,16 +120,10 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct= ib_device *device, umem->ibdev =3D device; umem->writable =3D ib_access_writable(access); umem->owning_mm =3D current->mm; - umem_odp->is_implicit_odp =3D 1; umem_odp->page_shift =3D PAGE_SHIFT; =20 umem_odp->tgid =3D get_task_pid(current->group_leader, PIDTYPE_PID); - ret =3D ib_init_umem_odp(umem_odp, NULL); - if (ret) { - put_pid(umem_odp->tgid); - kfree(umem_odp); - return ERR_PTR(ret); - } + ib_init_umem_implicit_odp(umem_odp); return umem_odp; } EXPORT_SYMBOL(ib_umem_odp_alloc_implicit); @@ -248,7 +244,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *d= evice, } EXPORT_SYMBOL(ib_umem_odp_get); =20 -void ib_umem_odp_release(struct ib_umem_odp *umem_odp) +static void ib_umem_odp_free(struct ib_umem_odp *umem_odp) { struct ib_device *dev =3D umem_odp->umem.ibdev; =20 @@ -258,14 +254,19 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp) * It is the driver's responsibility to ensure, before calling us, * that the hardware will not attempt to access the MR any more. */ - if (!umem_odp->is_implicit_odp) { - mutex_lock(&umem_odp->umem_mutex); - ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp), - ib_umem_end(umem_odp)); - mutex_unlock(&umem_odp->umem_mutex); - mmu_interval_notifier_remove(&umem_odp->notifier); - hmm_dma_map_free(dev->dma_device, &umem_odp->map); - } + mutex_lock(&umem_odp->umem_mutex); + ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp), + ib_umem_end(umem_odp)); + mutex_unlock(&umem_odp->umem_mutex); + mmu_interval_notifier_remove(&umem_odp->notifier); + hmm_dma_map_free(dev->dma_device, &umem_odp->map); +} + +void ib_umem_odp_release(struct ib_umem_odp *umem_odp) +{ + if (!umem_odp->is_implicit_odp) + ib_umem_odp_free(umem_odp); + put_pid(umem_odp->tgid); kfree(umem_odp); } --=20 2.49.0