[PATCH rdma-next 4/6] RDMA/mlx5: Add TLP VAR region support and infrastructure

Leon Romanovsky posted 6 patches 1 month, 1 week ago
[PATCH rdma-next 4/6] RDMA/mlx5: Add TLP VAR region support and infrastructure
Posted by Leon Romanovsky 1 month, 1 week ago
From: Maher Sanalla <msanalla@nvidia.com>

Add support for TLP (Transaction Layer Packet) VAR regions used by
software-defined device emulation. TLP VAR provides dedicated response
gateways for sending TLP responses back to the host in TLP emulation
scenarios.

Signed-off-by: Maher Sanalla <msanalla@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 drivers/infiniband/hw/mlx5/main.c    | 57 ++++++++++++++++++++++++++++++++----
 drivers/infiniband/hw/mlx5/mlx5_ib.h |  2 ++
 2 files changed, 54 insertions(+), 5 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 835fe2a95ad6..424426a2cd76 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2518,6 +2518,15 @@ mlx5_ib_pgoff_to_mmap_entry(struct ib_ucontext *ucontext, off_t pg_off)
 	return rdma_user_mmap_entry_get_pgoff(ucontext, entry_pgoff);
 }
 
+static void mlx5_ib_free_var_mmap_entry(struct mlx5_user_mmap_entry *mentry,
+					struct mlx5_var_region *var_region)
+{
+	mutex_lock(&var_region->bitmap_lock);
+	clear_bit(mentry->page_idx, var_region->bitmap);
+	mutex_unlock(&var_region->bitmap_lock);
+	kfree(mentry);
+}
+
 static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
 {
 	struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
@@ -2533,10 +2542,11 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
 		break;
 	case MLX5_IB_MMAP_TYPE_VAR:
 		var_region = &var_table->var_region;
-		mutex_lock(&var_region->bitmap_lock);
-		clear_bit(mentry->page_idx, var_region->bitmap);
-		mutex_unlock(&var_region->bitmap_lock);
-		kfree(mentry);
+		mlx5_ib_free_var_mmap_entry(mentry, var_region);
+		break;
+	case MLX5_IB_MMAP_TYPE_TLP_VAR:
+		var_region = &var_table->tlp_var_region;
+		mlx5_ib_free_var_mmap_entry(mentry, var_region);
 		break;
 	case MLX5_IB_MMAP_TYPE_UAR_WC:
 	case MLX5_IB_MMAP_TYPE_UAR_NC:
@@ -2687,6 +2697,7 @@ static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
 	mentry = to_mmmap(entry);
 	pfn = (mentry->address >> PAGE_SHIFT);
 	if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR ||
+	    mentry->mmap_flag == MLX5_IB_MMAP_TYPE_TLP_VAR ||
 	    mentry->mmap_flag == MLX5_IB_MMAP_TYPE_UAR_NC)
 		prot = pgprot_noncached(vma->vm_page_prot);
 	else
@@ -4636,6 +4647,28 @@ static int mlx5_ib_init_var_region(struct mlx5_ib_dev *dev)
 	return (var_region->bitmap) ? 0 : -ENOMEM;
 }
 
+static int mlx5_ib_init_tlp_var_region(struct mlx5_ib_dev *dev)
+{
+	struct mlx5_var_region *var_region = &dev->var_table.tlp_var_region;
+	struct mlx5_core_dev *mdev = dev->mdev;
+	u8 log_tlp_var_stride;
+
+	log_tlp_var_stride =
+		MLX5_CAP_DEV_TLP_EMULATION(mdev, log_tlp_rsp_gw_page_stride);
+	var_region->hw_start_addr =
+		dev->mdev->bar_addr +
+		MLX5_CAP64_DEV_TLP_EMULATION(mdev, tlp_rsp_gw_pages_bar_offset);
+
+	var_region->stride_size = (1ULL << log_tlp_var_stride) * 4096;
+	var_region->num_var_hw_entries =
+		MLX5_CAP_DEV_TLP_EMULATION(mdev, tlp_rsp_gw_num_pages);
+
+	mutex_init(&var_region->bitmap_lock);
+	var_region->bitmap = bitmap_zalloc(var_region->num_var_hw_entries,
+					   GFP_KERNEL);
+	return (var_region->bitmap) ? 0 : -ENOMEM;
+}
+
 static void mlx5_ib_cleanup_ucaps(struct mlx5_ib_dev *dev)
 {
 	if (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RDMA_CTRL)
@@ -4671,13 +4704,19 @@ static int mlx5_ib_init_ucaps(struct mlx5_ib_dev *dev)
 	return ret;
 }
 
+static void mlx5_ib_cleanup_var_table(struct mlx5_ib_dev *dev)
+{
+	bitmap_free(dev->var_table.var_region.bitmap);
+	bitmap_free(dev->var_table.tlp_var_region.bitmap);
+}
+
 static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev *dev)
 {
 	if (MLX5_CAP_GEN_2_64(dev->mdev, general_obj_types_127_64) &
 	    MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL)
 		mlx5_ib_cleanup_ucaps(dev);
 
-	bitmap_free(dev->var_table.var_region.bitmap);
+	mlx5_ib_cleanup_var_table(dev);
 }
 
 static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
@@ -4737,10 +4776,18 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
 			goto err_ucaps;
 	}
 
+	if (MLX5_CAP_GEN(dev->mdev, tlp_device_emulation_manager)) {
+		err = mlx5_ib_init_tlp_var_region(dev);
+		if (err)
+			goto err_tlp_var;
+	}
+
 	dev->ib_dev.use_cq_dim = true;
 
 	return 0;
 
+err_tlp_var:
+	mlx5_ib_cleanup_ucaps(dev);
 err_ucaps:
 	bitmap_free(dev->var_table.var_region.bitmap);
 	return err;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 3d0ae52c68a7..5f789291be93 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -162,6 +162,7 @@ enum mlx5_ib_mmap_type {
 	MLX5_IB_MMAP_TYPE_UAR_WC = 3,
 	MLX5_IB_MMAP_TYPE_UAR_NC = 4,
 	MLX5_IB_MMAP_TYPE_MEMIC_OP = 5,
+	MLX5_IB_MMAP_TYPE_TLP_VAR = 6,
 };
 
 struct mlx5_bfreg_info {
@@ -1143,6 +1144,7 @@ struct mlx5_var_region {
 
 struct mlx5_var_table {
 	struct mlx5_var_region var_region;
+	struct mlx5_var_region tlp_var_region;
 };
 
 struct mlx5_port_caps {

-- 
2.53.0