rxe_mr_copy() is used widely to copy data to/from a user MR. requester uses
it to load payloads of requesting packets; responder uses it to process
Send, Write, and Read operaetions; completer uses it to copy data from
response packets of Read and Atomic operations to a user MR.
Allow these operations to be used with ODP by adding a subordinate function
rxe_odp_mr_copy(). It is comprised of the following steps:
1. Check page presence and R/W permission.
2. If OK, just execute data copy to/from the pages and exit.
3. Otherwise, trigger page fault to map the pages.
4. Update the MR xarray using PFNs in umem_odp->pfn_list.
5. Execute data copy to/from the pages.
Signed-off-by: Daisuke Matsuda <matsuda-daisuke@fujitsu.com>
---
drivers/infiniband/sw/rxe/rxe.c | 10 ++++
drivers/infiniband/sw/rxe/rxe_loc.h | 8 ++++
drivers/infiniband/sw/rxe/rxe_mr.c | 9 +++-
drivers/infiniband/sw/rxe/rxe_odp.c | 73 +++++++++++++++++++++++++++++
4 files changed, 98 insertions(+), 2 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 3ca73f8d96cc..ea643ebf9667 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -81,6 +81,16 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
/* IB_ODP_SUPPORT_IMPLICIT is not supported right now. */
rxe->attr.odp_caps.general_caps |= IB_ODP_SUPPORT;
+
+ rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
+ rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_RECV;
+ rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 51b77e8827aa..2483e90a5443 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -193,6 +193,8 @@ static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
u64 iova, int access_flags, struct rxe_mr *mr);
+int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
+ enum rxe_mr_copy_dir dir);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline int
rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
@@ -200,6 +202,12 @@ rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
{
return -EOPNOTSUPP;
}
+static inline int
+rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
+ int length, enum rxe_mr_copy_dir dir)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 5589314a1e67..eef3976309eb 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -247,7 +247,12 @@ int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
void *va;
while (length) {
- page = xa_load(&mr->page_list, index);
+ if (mr->umem->is_odp)
+ page = xa_untag_pointer(xa_load(&mr->page_list,
+ index));
+ else
+ page = xa_load(&mr->page_list, index);
+
if (!page)
return -EFAULT;
@@ -319,7 +324,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
}
if (mr->umem->is_odp)
- return -EOPNOTSUPP;
+ return rxe_odp_mr_copy(mr, iova, addr, length, dir);
else
return rxe_mr_copy_xarray(mr, iova, addr, length, dir);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_odp.c b/drivers/infiniband/sw/rxe/rxe_odp.c
index c5e24901c141..979af279cf36 100644
--- a/drivers/infiniband/sw/rxe/rxe_odp.c
+++ b/drivers/infiniband/sw/rxe/rxe_odp.c
@@ -177,3 +177,76 @@ int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
return err;
}
+
+/* Take xarray spinlock before entry */
+static inline bool rxe_odp_check_pages(struct rxe_mr *mr, u64 iova,
+ int length, u32 flags)
+{
+ unsigned long upper = rxe_mr_iova_to_index(mr, iova + length - 1);
+ unsigned long lower = rxe_mr_iova_to_index(mr, iova);
+ bool need_fault = false;
+ void *page, *entry;
+ size_t perm = 0;
+
+ if (!(flags & RXE_PAGEFAULT_RDONLY))
+ perm = RXE_ODP_WRITABLE_BIT;
+
+ XA_STATE(xas, &mr->page_list, lower);
+
+ while (xas.xa_index <= upper) {
+ page = xas_load(&xas);
+
+ /* Check page presence and write permission */
+ if (!page || (perm && !(xa_pointer_tag(page) & perm))) {
+ need_fault = true;
+ break;
+ }
+ entry = xas_next(&xas);
+ }
+
+ return need_fault;
+}
+
+int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
+ enum rxe_mr_copy_dir dir)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ u32 flags = 0;
+ int err;
+
+ if (unlikely(!mr->umem->is_odp))
+ return -EOPNOTSUPP;
+
+ switch (dir) {
+ case RXE_TO_MR_OBJ:
+ break;
+
+ case RXE_FROM_MR_OBJ:
+ flags = RXE_PAGEFAULT_RDONLY;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock(&mr->page_list.xa_lock);
+
+ if (rxe_odp_check_pages(mr, iova, length, flags)) {
+ spin_unlock(&mr->page_list.xa_lock);
+
+ /* umem_mutex is locked on success */
+ err = rxe_odp_do_pagefault_and_lock(mr, iova, length, flags);
+ if (err < 0)
+ return err;
+
+ /* spinlock to prevent page invalidation */
+ spin_lock(&mr->page_list.xa_lock);
+ mutex_unlock(&umem_odp->umem_mutex);
+ }
+
+ err = rxe_mr_copy_xarray(mr, iova, addr, length, dir);
+
+ spin_unlock(&mr->page_list.xa_lock);
+
+ return err;
+}
--
2.43.0
© 2016 - 2024 Red Hat, Inc.