DPDK patches and discussions
 help / color / mirror / Atom feed
From: Michael Baum <michaelba@nvidia.com>
To: <dev@dpdk.org>
Cc: Matan Azrad <matan@nvidia.com>,
	Raslan Darawsheh <rasland@nvidia.com>,
	Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [dpdk-dev] [RFC 13/21] vdpa/mlx5: use context device structure
Date: Tue, 17 Aug 2021 16:44:33 +0300	[thread overview]
Message-ID: <20210817134441.1966618-14-michaelba@nvidia.com> (raw)
In-Reply-To: <20210817134441.1966618-1-michaelba@nvidia.com>

Use common context device structure as a priv field.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa.c       | 185 ++++------------------------
 drivers/vdpa/mlx5/mlx5_vdpa.h       |   4 +-
 drivers/vdpa/mlx5/mlx5_vdpa_event.c |  19 +--
 drivers/vdpa/mlx5/mlx5_vdpa_lm.c    |   6 +-
 drivers/vdpa/mlx5/mlx5_vdpa_mem.c   |  13 +-
 drivers/vdpa/mlx5/mlx5_vdpa_steer.c |  10 +-
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c |  16 +--
 7 files changed, 61 insertions(+), 192 deletions(-)

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index 6d17d7a6f3..f773ac8711 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -189,37 +189,6 @@ mlx5_vdpa_features_set(int vid)
 	return 0;
 }
 
-static int
-mlx5_vdpa_pd_create(struct mlx5_vdpa_priv *priv)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	priv->pd = mlx5_glue->alloc_pd(priv->ctx);
-	if (priv->pd == NULL) {
-		DRV_LOG(ERR, "Failed to allocate PD.");
-		return errno ? -errno : -ENOMEM;
-	}
-	struct mlx5dv_obj obj;
-	struct mlx5dv_pd pd_info;
-	int ret = 0;
-
-	obj.pd.in = priv->pd;
-	obj.pd.out = &pd_info;
-	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
-	if (ret) {
-		DRV_LOG(ERR, "Fail to get PD object info.");
-		mlx5_glue->dealloc_pd(priv->pd);
-		priv->pd = NULL;
-		return -errno;
-	}
-	priv->pdn = pd_info.pdn;
-	return 0;
-#else
-	(void)priv;
-	DRV_LOG(ERR, "Cannot get pdn - no DV support.");
-	return -ENOTSUP;
-#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
-}
-
 static int
 mlx5_vdpa_mtu_set(struct mlx5_vdpa_priv *priv)
 {
@@ -238,7 +207,8 @@ mlx5_vdpa_mtu_set(struct mlx5_vdpa_priv *priv)
 		DRV_LOG(DEBUG, "Vhost MTU is 0.");
 		return ret;
 	}
-	ret = mlx5_get_ifname_sysfs(priv->ctx->device->ibdev_path,
+	ret = mlx5_get_ifname_sysfs(mlx5_os_get_ctx_device_path
+							   (priv->dev_ctx->ctx),
 				    request.ifr_name);
 	if (ret) {
 		DRV_LOG(DEBUG, "Cannot get kernel IF name - %d.", ret);
@@ -289,10 +259,6 @@ mlx5_vdpa_dev_close(int vid)
 	mlx5_vdpa_virtqs_release(priv);
 	mlx5_vdpa_event_qp_global_release(priv);
 	mlx5_vdpa_mem_dereg(priv);
-	if (priv->pd) {
-		claim_zero(mlx5_glue->dealloc_pd(priv->pd));
-		priv->pd = NULL;
-	}
 	priv->configured = 0;
 	priv->vid = 0;
 	/* The mutex may stay locked after event thread cancel - initiate it. */
@@ -320,8 +286,7 @@ mlx5_vdpa_dev_config(int vid)
 	if (mlx5_vdpa_mtu_set(priv))
 		DRV_LOG(WARNING, "MTU cannot be set on device %s.",
 				vdev->device->name);
-	if (mlx5_vdpa_pd_create(priv) || mlx5_vdpa_mem_register(priv) ||
-	    mlx5_vdpa_err_event_setup(priv) ||
+	if (mlx5_vdpa_mem_register(priv) || mlx5_vdpa_err_event_setup(priv) ||
 	    mlx5_vdpa_virtqs_prepare(priv) || mlx5_vdpa_steer_setup(priv) ||
 	    mlx5_vdpa_cqe_event_setup(priv)) {
 		mlx5_vdpa_dev_close(vid);
@@ -343,7 +308,7 @@ mlx5_vdpa_get_device_fd(int vid)
 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
 		return -EINVAL;
 	}
-	return priv->ctx->cmd_fd;
+	return ((struct ibv_context *)priv->dev_ctx->ctx)->cmd_fd;
 }
 
 static int
@@ -472,98 +437,6 @@ static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
 	.reset_stats = mlx5_vdpa_reset_stats,
 };
 
-/* Try to disable ROCE by Netlink\Devlink. */
-static int
-mlx5_vdpa_nl_roce_disable(const char *addr)
-{
-	int nlsk_fd = mlx5_nl_init(NETLINK_GENERIC);
-	int devlink_id;
-	int enable;
-	int ret;
-
-	if (nlsk_fd < 0)
-		return nlsk_fd;
-	devlink_id = mlx5_nl_devlink_family_id_get(nlsk_fd);
-	if (devlink_id < 0) {
-		ret = devlink_id;
-		DRV_LOG(DEBUG, "Failed to get devlink id for ROCE operations by"
-			" Netlink.");
-		goto close;
-	}
-	ret = mlx5_nl_enable_roce_get(nlsk_fd, devlink_id, addr, &enable);
-	if (ret) {
-		DRV_LOG(DEBUG, "Failed to get ROCE enable by Netlink: %d.",
-			ret);
-		goto close;
-	} else if (!enable) {
-		DRV_LOG(INFO, "ROCE has already disabled(Netlink).");
-		goto close;
-	}
-	ret = mlx5_nl_enable_roce_set(nlsk_fd, devlink_id, addr, 0);
-	if (ret)
-		DRV_LOG(DEBUG, "Failed to disable ROCE by Netlink: %d.", ret);
-	else
-		DRV_LOG(INFO, "ROCE is disabled by Netlink successfully.");
-close:
-	close(nlsk_fd);
-	return ret;
-}
-
-/* Try to disable ROCE by sysfs. */
-static int
-mlx5_vdpa_sys_roce_disable(const char *addr)
-{
-	FILE *file_o;
-	int enable;
-	int ret;
-
-	MKSTR(file_p, "/sys/bus/pci/devices/%s/roce_enable", addr);
-	file_o = fopen(file_p, "rb");
-	if (!file_o) {
-		rte_errno = ENOTSUP;
-		return -ENOTSUP;
-	}
-	ret = fscanf(file_o, "%d", &enable);
-	if (ret != 1) {
-		rte_errno = EINVAL;
-		ret = EINVAL;
-		goto close;
-	} else if (!enable) {
-		ret = 0;
-		DRV_LOG(INFO, "ROCE has already disabled(sysfs).");
-		goto close;
-	}
-	fclose(file_o);
-	file_o = fopen(file_p, "wb");
-	if (!file_o) {
-		rte_errno = ENOTSUP;
-		return -ENOTSUP;
-	}
-	fprintf(file_o, "0\n");
-	ret = 0;
-close:
-	if (ret)
-		DRV_LOG(DEBUG, "Failed to disable ROCE by sysfs: %d.", ret);
-	else
-		DRV_LOG(INFO, "ROCE is disabled by sysfs successfully.");
-	fclose(file_o);
-	return ret;
-}
-
-static int
-mlx5_vdpa_roce_disable(struct rte_device *dev)
-{
-	char pci_addr[PCI_PRI_STR_SIZE] = { 0 };
-
-	if (mlx5_dev_to_pci_str(dev, pci_addr, sizeof(pci_addr)) < 0)
-		return -rte_errno;
-	/* Firstly try to disable ROCE by Netlink and fallback to sysfs. */
-	if (mlx5_vdpa_nl_roce_disable(pci_addr) != 0 &&
-	    mlx5_vdpa_sys_roce_disable(pci_addr) != 0)
-		return -rte_errno;
-	return 0;
-}
-
 static int
 mlx5_vdpa_args_check_handler(const char *key, const char *val, void *opaque)
 {
@@ -632,39 +505,26 @@ mlx5_vdpa_config_get(struct rte_devargs *devargs, struct mlx5_vdpa_priv *priv)
 static int
 mlx5_vdpa_dev_probe(struct rte_device *dev)
 {
-	struct ibv_device *ibv;
 	struct mlx5_vdpa_priv *priv = NULL;
-	struct ibv_context *ctx = NULL;
+	struct mlx5_dev_ctx *dev_ctx = NULL;
 	struct mlx5_hca_attr attr;
-	int retry;
 	int ret;
 
-	if (mlx5_vdpa_roce_disable(dev) != 0) {
-		DRV_LOG(WARNING, "Failed to disable ROCE for \"%s\".",
-			dev->name);
-		return -rte_errno;
-	}
-	/* Wait for the IB device to appear again after reload. */
-	for (retry = MLX5_VDPA_MAX_RETRIES; retry > 0; --retry) {
-		ibv = mlx5_os_get_ibv_dev(dev);
-		if (ibv != NULL)
-			break;
-		usleep(MLX5_VDPA_USEC);
-	}
-	if (ibv == NULL) {
-		DRV_LOG(ERR, "Cannot get IB device after disabling RoCE for "
-				"\"%s\", retries exceed %d.",
-				dev->name, MLX5_VDPA_MAX_RETRIES);
-		rte_errno = EAGAIN;
+	dev_ctx = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_dev_ctx),
+			      RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+	if (dev_ctx == NULL) {
+		DRV_LOG(ERR, "Device context allocation failure.");
+		rte_errno = ENOMEM;
 		return -rte_errno;
 	}
-	ctx = mlx5_glue->dv_open_device(ibv);
-	if (!ctx) {
-		DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
+	ret = mlx5_dev_ctx_prepare(dev_ctx, dev, MLX5_CLASS_VDPA);
+	if (ret < 0) {
+		DRV_LOG(ERR, "Failed to create device context.");
+		mlx5_free(dev_ctx);
 		rte_errno = ENODEV;
 		return -rte_errno;
 	}
-	ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
+	ret = mlx5_devx_cmd_query_hca_attr(dev_ctx->ctx, &attr);
 	if (ret) {
 		DRV_LOG(ERR, "Unable to read HCA capabilities.");
 		rte_errno = ENOTSUP;
@@ -692,8 +552,8 @@ mlx5_vdpa_dev_probe(struct rte_device *dev)
 	priv->qp_ts_format = attr.qp_ts_format;
 	if (attr.num_lag_ports == 0)
 		priv->num_lag_ports = 1;
-	priv->ctx = ctx;
-	priv->var = mlx5_glue->dv_alloc_var(ctx, 0);
+	priv->dev_ctx = dev_ctx;
+	priv->var = mlx5_glue->dv_alloc_var(dev_ctx->ctx, 0);
 	if (!priv->var) {
 		DRV_LOG(ERR, "Failed to allocate VAR %u.", errno);
 		goto error;
@@ -718,8 +578,10 @@ mlx5_vdpa_dev_probe(struct rte_device *dev)
 			mlx5_glue->dv_free_var(priv->var);
 		rte_free(priv);
 	}
-	if (ctx)
-		mlx5_glue->close_device(ctx);
+	if (dev_ctx) {
+		mlx5_dev_ctx_release(dev_ctx);
+		mlx5_free(dev_ctx);
+	}
 	return -rte_errno;
 }
 
@@ -748,7 +610,10 @@ mlx5_vdpa_dev_remove(struct rte_device *dev)
 		}
 		if (priv->vdev)
 			rte_vdpa_unregister_device(priv->vdev);
-		mlx5_glue->close_device(priv->ctx);
+		if (priv->dev_ctx) {
+			mlx5_dev_ctx_release(priv->dev_ctx);
+			mlx5_free(priv->dev_ctx);
+		}
 		pthread_mutex_destroy(&priv->vq_config_lock);
 		rte_free(priv);
 	}
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 2a04e36607..dc9ba1c3c2 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -132,10 +132,8 @@ struct mlx5_vdpa_priv {
 	uint16_t hw_max_pending_comp; /* Hardware CQ moderation counter. */
 	struct rte_vdpa_device *vdev; /* vDPA device. */
 	int vid; /* vhost device id. */
-	struct ibv_context *ctx; /* Device context. */
+	struct mlx5_dev_ctx *dev_ctx; /* Device context. */
 	struct mlx5_hca_vdpa_attr caps;
-	uint32_t pdn; /* Protection Domain number. */
-	struct ibv_pd *pd;
 	uint32_t gpa_mkey_index;
 	struct ibv_mr *null_mr;
 	struct rte_vhost_memory *vmem;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c
index 3541c652ce..056a3c2936 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c
@@ -48,7 +48,7 @@ mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
 {
 	if (priv->eventc)
 		return 0;
-	priv->eventc = mlx5_os_devx_create_event_channel(priv->ctx,
+	priv->eventc = mlx5_os_devx_create_event_channel(priv->dev_ctx->ctx,
 			   MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
 	if (!priv->eventc) {
 		rte_errno = errno;
@@ -61,7 +61,7 @@ mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
 	 * registers writings, it is safe to allocate UAR with any
 	 * memory mapping type.
 	 */
-	priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
+	priv->uar = mlx5_devx_alloc_uar(priv->dev_ctx->ctx, -1);
 	if (!priv->uar) {
 		rte_errno = errno;
 		DRV_LOG(ERR, "Failed to allocate UAR.");
@@ -115,8 +115,8 @@ mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
 	uint16_t event_nums[1] = {0};
 	int ret;
 
-	ret = mlx5_devx_cq_create(priv->ctx, &cq->cq_obj, log_desc_n, &attr,
-				  SOCKET_ID_ANY);
+	ret = mlx5_devx_cq_create(priv->dev_ctx->ctx, &cq->cq_obj, log_desc_n,
+				  &attr, SOCKET_ID_ANY);
 	if (ret)
 		goto error;
 	cq->cq_ci = 0;
@@ -397,7 +397,8 @@ mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
 	int flags;
 
 	/* Setup device event channel. */
-	priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->ctx, 0);
+	priv->err_chnl =
+		mlx5_glue->devx_create_event_channel(priv->dev_ctx->ctx, 0);
 	if (!priv->err_chnl) {
 		rte_errno = errno;
 		DRV_LOG(ERR, "Failed to create device event channel %d.",
@@ -598,9 +599,9 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
 		return -1;
 	if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
 		return -1;
-	attr.pd = priv->pdn;
+	attr.pd = priv->dev_ctx->pdn;
 	attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
-	eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
+	eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->dev_ctx->ctx, &attr);
 	if (!eqp->fw_qp) {
 		DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
 		goto error;
@@ -611,7 +612,7 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
 		rte_errno = ENOMEM;
 		goto error;
 	}
-	eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
+	eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->dev_ctx->ctx,
 					       (void *)(uintptr_t)eqp->umem_buf,
 					       umem_size,
 					       IBV_ACCESS_LOCAL_WRITE);
@@ -631,7 +632,7 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
 	attr.dbr_umem_id = eqp->umem_obj->umem_id;
 	attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
 	attr.dbr_address = RTE_BIT64(log_desc_n) * MLX5_WSEG_SIZE;
-	eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
+	eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->dev_ctx->ctx, &attr);
 	if (!eqp->sw_qp) {
 		DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
 		goto error;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index f391813745..1e9a946708 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -39,7 +39,7 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
 	struct mlx5_devx_mkey_attr mkey_attr = {
 			.addr = (uintptr_t)log_base,
 			.size = log_size,
-			.pd = priv->pdn,
+			.pd = priv->dev_ctx->pdn,
 			.pg_access = 1,
 	};
 	struct mlx5_devx_virtq_attr attr = {
@@ -54,7 +54,7 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
 		DRV_LOG(ERR, "Failed to allocate mem for lm mr.");
 		return -1;
 	}
-	mr->umem = mlx5_glue->devx_umem_reg(priv->ctx,
+	mr->umem = mlx5_glue->devx_umem_reg(priv->dev_ctx->ctx,
 					    (void *)(uintptr_t)log_base,
 					    log_size, IBV_ACCESS_LOCAL_WRITE);
 	if (!mr->umem) {
@@ -62,7 +62,7 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
 		goto err;
 	}
 	mkey_attr.umem_id = mr->umem->umem_id;
-	mr->mkey = mlx5_devx_cmd_mkey_create(priv->ctx, &mkey_attr);
+	mr->mkey = mlx5_devx_cmd_mkey_create(priv->dev_ctx->ctx, &mkey_attr);
 	if (!mr->mkey) {
 		DRV_LOG(ERR, "Failed to create Mkey for lm.");
 		goto err;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
index a13bde5a0b..bec83eddde 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
@@ -193,7 +193,7 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)
 	if (!mem)
 		return -rte_errno;
 	priv->vmem = mem;
-	priv->null_mr = mlx5_glue->alloc_null_mr(priv->pd);
+	priv->null_mr = mlx5_glue->alloc_null_mr(priv->dev_ctx->pd);
 	if (!priv->null_mr) {
 		DRV_LOG(ERR, "Failed to allocate null MR.");
 		ret = -errno;
@@ -209,7 +209,7 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)
 			DRV_LOG(ERR, "Failed to allocate mem entry memory.");
 			goto error;
 		}
-		entry->umem = mlx5_glue->devx_umem_reg(priv->ctx,
+		entry->umem = mlx5_glue->devx_umem_reg(priv->dev_ctx->ctx,
 					 (void *)(uintptr_t)reg->host_user_addr,
 					     reg->size, IBV_ACCESS_LOCAL_WRITE);
 		if (!entry->umem) {
@@ -220,9 +220,10 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)
 		mkey_attr.addr = (uintptr_t)(reg->guest_phys_addr);
 		mkey_attr.size = reg->size;
 		mkey_attr.umem_id = entry->umem->umem_id;
-		mkey_attr.pd = priv->pdn;
+		mkey_attr.pd = priv->dev_ctx->pdn;
 		mkey_attr.pg_access = 1;
-		entry->mkey = mlx5_devx_cmd_mkey_create(priv->ctx, &mkey_attr);
+		entry->mkey = mlx5_devx_cmd_mkey_create(priv->dev_ctx->ctx,
+							&mkey_attr);
 		if (!entry->mkey) {
 			DRV_LOG(ERR, "Failed to create direct Mkey.");
 			ret = -rte_errno;
@@ -267,7 +268,7 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)
 	}
 	mkey_attr.addr = (uintptr_t)(mem->regions[0].guest_phys_addr);
 	mkey_attr.size = mem_size;
-	mkey_attr.pd = priv->pdn;
+	mkey_attr.pd = priv->dev_ctx->pdn;
 	mkey_attr.umem_id = 0;
 	/* Must be zero for KLM mode. */
 	mkey_attr.log_entity_size = mode == MLX5_MKC_ACCESS_MODE_KLM_FBS ?
@@ -281,7 +282,7 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)
 		ret = -ENOMEM;
 		goto error;
 	}
-	entry->mkey = mlx5_devx_cmd_mkey_create(priv->ctx, &mkey_attr);
+	entry->mkey = mlx5_devx_cmd_mkey_create(priv->dev_ctx->ctx, &mkey_attr);
 	if (!entry->mkey) {
 		DRV_LOG(ERR, "Failed to create indirect Mkey.");
 		ret = -rte_errno;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
index 383f003966..ae2ca9ccac 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
@@ -98,7 +98,8 @@ mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
 	attr->rqt_max_size = rqt_n;
 	attr->rqt_actual_size = rqt_n;
 	if (!priv->steer.rqt) {
-		priv->steer.rqt = mlx5_devx_cmd_create_rqt(priv->ctx, attr);
+		priv->steer.rqt = mlx5_devx_cmd_create_rqt(priv->dev_ctx->ctx,
+							   attr);
 		if (!priv->steer.rqt) {
 			DRV_LOG(ERR, "Failed to create RQT.");
 			ret = -rte_errno;
@@ -116,6 +117,7 @@ static int __rte_unused
 mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv)
 {
 #ifdef HAVE_MLX5DV_DR
+	struct ibv_context *ctx = priv->dev_ctx->ctx;
 	struct mlx5_devx_tir_attr tir_att = {
 		.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT,
 		.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ,
@@ -204,12 +206,12 @@ mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv)
 		tir_att.rx_hash_field_selector_outer.selected_fields =
 								  vars[i][HASH];
 		priv->steer.rss[i].matcher = mlx5_glue->dv_create_flow_matcher
-					 (priv->ctx, &dv_attr, priv->steer.tbl);
+					       (ctx, &dv_attr, priv->steer.tbl);
 		if (!priv->steer.rss[i].matcher) {
 			DRV_LOG(ERR, "Failed to create matcher %d.", i);
 			goto error;
 		}
-		priv->steer.rss[i].tir = mlx5_devx_cmd_create_tir(priv->ctx,
+		priv->steer.rss[i].tir = mlx5_devx_cmd_create_tir(ctx,
 								  &tir_att);
 		if (!priv->steer.rss[i].tir) {
 			DRV_LOG(ERR, "Failed to create TIR %d.", i);
@@ -268,7 +270,7 @@ int
 mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv)
 {
 #ifdef HAVE_MLX5DV_DR
-	priv->steer.domain = mlx5_glue->dr_create_domain(priv->ctx,
+	priv->steer.domain = mlx5_glue->dr_create_domain(priv->dev_ctx->ctx,
 						  MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
 	if (!priv->steer.domain) {
 		DRV_LOG(ERR, "Failed to create Rx domain.");
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index f530646058..d7c2d70947 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -250,7 +250,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
 	if (priv->caps.queue_counters_valid) {
 		if (!virtq->counters)
 			virtq->counters = mlx5_devx_cmd_create_virtio_q_counters
-								(priv->ctx);
+							   (priv->dev_ctx->ctx);
 		if (!virtq->counters) {
 			DRV_LOG(ERR, "Failed to create virtq couners for virtq"
 				" %d.", index);
@@ -269,7 +269,8 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
 				" %u.", i, index);
 			goto error;
 		}
-		virtq->umems[i].obj = mlx5_glue->devx_umem_reg(priv->ctx,
+		virtq->umems[i].obj = mlx5_glue->devx_umem_reg
+						       (priv->dev_ctx->ctx,
 							virtq->umems[i].buf,
 							virtq->umems[i].size,
 							IBV_ACCESS_LOCAL_WRITE);
@@ -322,11 +323,11 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
 	attr.mkey = priv->gpa_mkey_index;
 	attr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
 	attr.queue_index = index;
-	attr.pd = priv->pdn;
+	attr.pd = priv->dev_ctx->pdn;
 	attr.hw_latency_mode = priv->hw_latency_mode;
 	attr.hw_max_latency_us = priv->hw_max_latency_us;
 	attr.hw_max_pending_comp = priv->hw_max_pending_comp;
-	virtq->virtq = mlx5_devx_cmd_create_virtq(priv->ctx, &attr);
+	virtq->virtq = mlx5_devx_cmd_create_virtq(priv->dev_ctx->ctx, &attr);
 	virtq->priv = priv;
 	if (!virtq->virtq)
 		goto error;
@@ -434,6 +435,7 @@ int
 mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
 {
 	struct mlx5_devx_tis_attr tis_attr = {0};
+	struct ibv_context *ctx = priv->dev_ctx->ctx;
 	uint32_t i;
 	uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
 	int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
@@ -457,7 +459,7 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
 	}
 	/* Always map the entire page. */
 	priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |
-				   PROT_WRITE, MAP_SHARED, priv->ctx->cmd_fd,
+				   PROT_WRITE, MAP_SHARED, ctx->cmd_fd,
 				   priv->var->mmap_off);
 	if (priv->virtq_db_addr == MAP_FAILED) {
 		DRV_LOG(ERR, "Failed to map doorbell page %u.", errno);
@@ -467,7 +469,7 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
 		DRV_LOG(DEBUG, "VAR address of doorbell mapping is %p.",
 			priv->virtq_db_addr);
 	}
-	priv->td = mlx5_devx_cmd_create_td(priv->ctx);
+	priv->td = mlx5_devx_cmd_create_td(ctx);
 	if (!priv->td) {
 		DRV_LOG(ERR, "Failed to create transport domain.");
 		return -rte_errno;
@@ -476,7 +478,7 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
 	for (i = 0; i < priv->num_lag_ports; i++) {
 		/* 0 is auto affinity, non-zero value to propose port. */
 		tis_attr.lag_tx_port_affinity = i + 1;
-		priv->tiss[i] = mlx5_devx_cmd_create_tis(priv->ctx, &tis_attr);
+		priv->tiss[i] = mlx5_devx_cmd_create_tis(ctx, &tis_attr);
 		if (!priv->tiss[i]) {
 			DRV_LOG(ERR, "Failed to create TIS %u.", i);
 			goto error;
-- 
2.25.1


  parent reply	other threads:[~2021-08-17 13:47 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-17 13:44 [dpdk-dev] [RFC 00/21] mlx5: sharing global MR cache between drivers Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 01/21] net/mlx5: fix shared device context creation error flow Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 02/21] net/mlx5: fix PCI probing " Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 03/21] common/mlx5: add context device structure Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 04/21] compress/mlx5: use " Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 05/21] crypto/mlx5: " Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 06/21] regex/mlx5: " Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 07/21] net/mlx5: improve probe function on Windows Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 08/21] net/mlx5: improve probe function on Linux Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 09/21] net/mlx5: improve spawn function Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 10/21] net/mlx5: use context device structure Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 11/21] net/mlx5: move NUMA node field to context device Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 12/21] common/mlx5: add ROCE disable in context device creation Michael Baum
2021-08-17 13:44 ` Michael Baum [this message]
2021-08-17 13:44 ` [dpdk-dev] [RFC 14/21] mlx5: update device sent to probing Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 15/21] mlx5: share context device structure between drivers Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 16/21] common/mlx5: add HCA attributes to context device structure Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 17/21] regex/mlx5: use HCA attributes from context device Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 18/21] vdpa/mlx5: " Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 19/21] compress/mlx5: " Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 20/21] crypto/mlx5: " Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 21/21] net/mlx5: " Michael Baum

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210817134441.1966618-14-michaelba@nvidia.com \
    --to=michaelba@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).