From: Michael Baum <michaelba@nvidia.com>
To: <dev@dpdk.org>
Cc: Matan Azrad <matan@nvidia.com>,
Raslan Darawsheh <rasland@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [dpdk-dev] [RFC 06/21] regex/mlx5: use context device structure
Date: Tue, 17 Aug 2021 16:44:26 +0300 [thread overview]
Message-ID: <20210817134441.1966618-7-michaelba@nvidia.com> (raw)
In-Reply-To: <20210817134441.1966618-1-michaelba@nvidia.com>
Use common context device structure as a priv field.
Signed-off-by: Michael Baum <michaelba@nvidia.com>
---
drivers/regex/mlx5/mlx5_regex.c | 59 +++++++++++-----------
drivers/regex/mlx5/mlx5_regex.h | 23 +--------
drivers/regex/mlx5/mlx5_regex_control.c | 12 ++---
drivers/regex/mlx5/mlx5_regex_fastpath.c | 18 +++----
drivers/regex/mlx5/mlx5_rxp.c | 64 ++++++++++++------------
5 files changed, 72 insertions(+), 104 deletions(-)
diff --git a/drivers/regex/mlx5/mlx5_regex.c b/drivers/regex/mlx5/mlx5_regex.c
index f17b6df47f..11b24cde39 100644
--- a/drivers/regex/mlx5/mlx5_regex.c
+++ b/drivers/regex/mlx5/mlx5_regex.c
@@ -110,7 +110,8 @@ mlx5_regex_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
/* Iterate all the existing mlx5 devices. */
TAILQ_FOREACH(priv, &mlx5_mem_event_list, mem_event_cb)
mlx5_free_mr_by_addr(&priv->mr_scache,
- priv->ctx->device->name,
+ mlx5_os_get_ctx_device_name
+ (priv->dev_ctx->ctx),
addr, len);
pthread_mutex_unlock(&mem_event_list_lock);
break;
@@ -123,25 +124,31 @@ mlx5_regex_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
static int
mlx5_regex_dev_probe(struct rte_device *rte_dev)
{
- struct ibv_device *ibv;
struct mlx5_regex_priv *priv = NULL;
- struct ibv_context *ctx = NULL;
+ struct mlx5_dev_ctx *dev_ctx = NULL;
struct mlx5_hca_attr attr;
char name[RTE_REGEXDEV_NAME_MAX_LEN];
+ const char *ibdev_name;
int ret;
uint32_t val;
- ibv = mlx5_os_get_ibv_dev(rte_dev);
- if (ibv == NULL)
+ dev_ctx = rte_zmalloc("mlx5 context device", sizeof(*dev_ctx),
+ RTE_CACHE_LINE_SIZE);
+ if (dev_ctx == NULL) {
+ DRV_LOG(ERR, "Device context allocation failure.");
+ rte_errno = ENOMEM;
return -rte_errno;
- DRV_LOG(INFO, "Probe device \"%s\".", ibv->name);
- ctx = mlx5_glue->dv_open_device(ibv);
- if (!ctx) {
- DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
+ }
+ ret = mlx5_dev_ctx_prepare(dev_ctx, rte_dev, MLX5_CLASS_REGEX);
+ if (ret < 0) {
+ DRV_LOG(ERR, "Failed to create device context.");
+ rte_free(dev_ctx);
rte_errno = ENODEV;
return -rte_errno;
}
- ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
+ ibdev_name = mlx5_os_get_ctx_device_name(dev_ctx->ctx);
+ DRV_LOG(INFO, "Probe device \"%s\".", ibdev_name);
+ ret = mlx5_devx_cmd_query_hca_attr(dev_ctx->ctx, &attr);
if (ret) {
DRV_LOG(ERR, "Unable to read HCA capabilities.");
rte_errno = ENOTSUP;
@@ -152,7 +159,7 @@ mlx5_regex_dev_probe(struct rte_device *rte_dev)
rte_errno = ENOTSUP;
goto dev_error;
}
- if (mlx5_regex_engines_status(ctx, 2)) {
+ if (mlx5_regex_engines_status(dev_ctx->ctx, 2)) {
DRV_LOG(ERR, "RegEx engine error.");
rte_errno = ENOMEM;
goto dev_error;
@@ -165,13 +172,13 @@ mlx5_regex_dev_probe(struct rte_device *rte_dev)
goto dev_error;
}
priv->sq_ts_format = attr.sq_ts_format;
- priv->ctx = ctx;
+ priv->dev_ctx = dev_ctx;
priv->nb_engines = 2; /* attr.regexp_num_of_engines */
- ret = mlx5_devx_regex_register_read(priv->ctx, 0,
+ ret = mlx5_devx_regex_register_read(priv->dev_ctx->ctx, 0,
MLX5_RXP_CSR_IDENTIFIER, &val);
if (ret) {
DRV_LOG(ERR, "CSR read failed!");
- return -1;
+ goto dev_error;
}
if (val == MLX5_RXP_BF2_IDENTIFIER)
priv->is_bf2 = 1;
@@ -189,18 +196,12 @@ mlx5_regex_dev_probe(struct rte_device *rte_dev)
* registers writings, it is safe to allocate UAR with any
* memory mapping type.
*/
- priv->uar = mlx5_devx_alloc_uar(ctx, -1);
+ priv->uar = mlx5_devx_alloc_uar(dev_ctx->ctx, -1);
if (!priv->uar) {
DRV_LOG(ERR, "can't allocate uar.");
rte_errno = ENOMEM;
goto error;
}
- priv->pd = mlx5_glue->alloc_pd(ctx);
- if (!priv->pd) {
- DRV_LOG(ERR, "can't allocate pd.");
- rte_errno = ENOMEM;
- goto error;
- }
priv->regexdev->dev_ops = &mlx5_regexdev_ops;
priv->regexdev->enqueue = mlx5_regexdev_enqueue;
#ifdef HAVE_MLX5_UMR_IMKEY
@@ -238,15 +239,15 @@ mlx5_regex_dev_probe(struct rte_device *rte_dev)
return 0;
error:
- if (priv->pd)
- mlx5_glue->dealloc_pd(priv->pd);
if (priv->uar)
mlx5_glue->devx_free_uar(priv->uar);
if (priv->regexdev)
rte_regexdev_unregister(priv->regexdev);
dev_error:
- if (ctx)
- mlx5_glue->close_device(ctx);
+ if (dev_ctx) {
+ mlx5_dev_ctx_release(dev_ctx);
+ rte_free(dev_ctx);
+ }
if (priv)
rte_free(priv);
return -rte_errno;
@@ -274,14 +275,14 @@ mlx5_regex_dev_remove(struct rte_device *rte_dev)
NULL);
if (priv->mr_scache.cache.table)
mlx5_mr_release_cache(&priv->mr_scache);
- if (priv->pd)
- mlx5_glue->dealloc_pd(priv->pd);
if (priv->uar)
mlx5_glue->devx_free_uar(priv->uar);
if (priv->regexdev)
rte_regexdev_unregister(priv->regexdev);
- if (priv->ctx)
- mlx5_glue->close_device(priv->ctx);
+ if (priv->dev_ctx) {
+ mlx5_dev_ctx_release(priv->dev_ctx);
+ rte_free(priv->dev_ctx);
+ }
rte_free(priv);
}
return 0;
diff --git a/drivers/regex/mlx5/mlx5_regex.h b/drivers/regex/mlx5/mlx5_regex.h
index 514f3408f9..c7a57e6f1b 100644
--- a/drivers/regex/mlx5/mlx5_regex.h
+++ b/drivers/regex/mlx5/mlx5_regex.h
@@ -58,7 +58,7 @@ struct mlx5_regex_db {
struct mlx5_regex_priv {
TAILQ_ENTRY(mlx5_regex_priv) next;
- struct ibv_context *ctx; /* Device context. */
+ struct mlx5_dev_ctx *dev_ctx; /* Device context. */
struct rte_regexdev *regexdev; /* Pointer to the RegEx dev. */
uint16_t nb_queues; /* Number of queues. */
struct mlx5_regex_qp *qps; /* Pointer to the qp array. */
@@ -68,7 +68,6 @@ struct mlx5_regex_priv {
MLX5_RXP_EM_COUNT];
uint32_t nb_engines; /* Number of RegEx engines. */
struct mlx5dv_devx_uar *uar; /* UAR object. */
- struct ibv_pd *pd;
TAILQ_ENTRY(mlx5_regex_priv) mem_event_cb;
/**< Called by memory event callback. */
struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
@@ -77,26 +76,6 @@ struct mlx5_regex_priv {
uint8_t has_umr; /* The device supports UMR. */
};
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-static inline int
-regex_get_pdn(void *pd, uint32_t *pdn)
-{
- struct mlx5dv_obj obj;
- struct mlx5dv_pd pd_info;
- int ret = 0;
-
- obj.pd.in = pd;
- obj.pd.out = &pd_info;
- ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
- if (ret) {
- DRV_LOG(DEBUG, "Fail to get PD object info");
- return ret;
- }
- *pdn = pd_info.pdn;
- return 0;
-}
-#endif
-
/* mlx5_regex.c */
int mlx5_regex_start(struct rte_regexdev *dev);
int mlx5_regex_stop(struct rte_regexdev *dev);
diff --git a/drivers/regex/mlx5/mlx5_regex_control.c b/drivers/regex/mlx5/mlx5_regex_control.c
index 8ce2dabb55..125425a955 100644
--- a/drivers/regex/mlx5/mlx5_regex_control.c
+++ b/drivers/regex/mlx5/mlx5_regex_control.c
@@ -83,8 +83,8 @@ regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq)
int ret;
cq->ci = 0;
- ret = mlx5_devx_cq_create(priv->ctx, &cq->cq_obj, cq->log_nb_desc,
- &attr, SOCKET_ID_ANY);
+ ret = mlx5_devx_cq_create(priv->dev_ctx->ctx, &cq->cq_obj,
+ cq->log_nb_desc, &attr, SOCKET_ID_ANY);
if (ret) {
DRV_LOG(ERR, "Can't create CQ object.");
memset(cq, 0, sizeof(*cq));
@@ -147,18 +147,14 @@ regex_ctrl_create_sq(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
.state = MLX5_SQC_STATE_RDY,
};
struct mlx5_regex_sq *sq = &qp->sqs[q_ind];
- uint32_t pd_num = 0;
int ret;
sq->log_nb_desc = log_nb_desc;
sq->sqn = q_ind;
sq->ci = 0;
sq->pi = 0;
- ret = regex_get_pdn(priv->pd, &pd_num);
- if (ret)
- return ret;
- attr.wq_attr.pd = pd_num;
- ret = mlx5_devx_sq_create(priv->ctx, &sq->sq_obj,
+ attr.wq_attr.pd = priv->dev_ctx->pdn;
+ ret = mlx5_devx_sq_create(priv->dev_ctx->ctx, &sq->sq_obj,
MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_nb_desc),
&attr, SOCKET_ID_ANY);
if (ret) {
diff --git a/drivers/regex/mlx5/mlx5_regex_fastpath.c b/drivers/regex/mlx5/mlx5_regex_fastpath.c
index 786718af53..2a04713b9f 100644
--- a/drivers/regex/mlx5/mlx5_regex_fastpath.c
+++ b/drivers/regex/mlx5/mlx5_regex_fastpath.c
@@ -138,7 +138,8 @@ mlx5_regex_addr2mr(struct mlx5_regex_priv *priv, struct mlx5_mr_ctrl *mr_ctrl,
if (likely(lkey != UINT32_MAX))
return lkey;
/* Take slower bottom-half on miss. */
- return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
+ return mlx5_mr_addr2mr_bh(priv->dev_ctx->pd, 0, &priv->mr_scache,
+ mr_ctrl, addr,
!!(mbuf->ol_flags & EXT_ATTACHED_MBUF));
}
@@ -634,7 +635,7 @@ setup_sqs(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *queue)
static int
setup_buffers(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp)
{
- struct ibv_pd *pd = priv->pd;
+ struct ibv_pd *pd = priv->dev_ctx->pd;
uint32_t i;
int err;
@@ -724,6 +725,7 @@ mlx5_regexdev_setup_fastpath(struct mlx5_regex_priv *priv, uint32_t qp_id)
.klm_array = &klm,
.klm_num = 1,
.umr_en = 1,
+ .pd = priv->dev_ctx->pdn,
};
uint32_t i;
int err = 0;
@@ -740,19 +742,11 @@ mlx5_regexdev_setup_fastpath(struct mlx5_regex_priv *priv, uint32_t qp_id)
setup_sqs(priv, qp);
if (priv->has_umr) {
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- if (regex_get_pdn(priv->pd, &attr.pd)) {
- err = -rte_errno;
- DRV_LOG(ERR, "Failed to get pdn.");
- mlx5_regexdev_teardown_fastpath(priv, qp_id);
- return err;
- }
-#endif
for (i = 0; i < qp->nb_desc; i++) {
attr.klm_num = MLX5_REGEX_MAX_KLM_NUM;
attr.klm_array = qp->jobs[i].imkey_array;
- qp->jobs[i].imkey = mlx5_devx_cmd_mkey_create(priv->ctx,
- &attr);
+ qp->jobs[i].imkey = mlx5_devx_cmd_mkey_create
+ (priv->dev_ctx->ctx, &attr);
if (!qp->jobs[i].imkey) {
err = -rte_errno;
DRV_LOG(ERR, "Failed to allocate imkey.");
diff --git a/drivers/regex/mlx5/mlx5_rxp.c b/drivers/regex/mlx5/mlx5_rxp.c
index 380037e24c..7bd854883f 100644
--- a/drivers/regex/mlx5/mlx5_rxp.c
+++ b/drivers/regex/mlx5/mlx5_rxp.c
@@ -167,7 +167,7 @@ rxp_init_rtru(struct mlx5_regex_priv *priv, uint8_t id, uint32_t init_bits)
uint32_t poll_value;
uint32_t expected_value;
uint32_t expected_mask;
- struct ibv_context *ctx = priv->ctx;
+ struct ibv_context *ctx = priv->dev_ctx->ctx;
int ret = 0;
/* Read the rtru ctrl CSR. */
@@ -284,6 +284,7 @@ rxp_program_rof(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
uint32_t rof_rule_addr;
uint64_t tmp_write_swap[4];
struct mlx5_rxp_rof_entry rules[8];
+ struct ibv_context *ctx = priv->dev_ctx->ctx;
int i;
int db_free;
int j;
@@ -313,7 +314,7 @@ rxp_program_rof(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
tmp_addr = rxp_get_reg_address(address);
if (tmp_addr == UINT32_MAX)
goto parse_error;
- ret = mlx5_devx_regex_register_read(priv->ctx, id,
+ ret = mlx5_devx_regex_register_read(ctx, id,
tmp_addr, ®_val);
if (ret)
goto parse_error;
@@ -337,7 +338,7 @@ rxp_program_rof(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
tmp_addr = rxp_get_reg_address(address);
if (tmp_addr == UINT32_MAX)
goto parse_error;
- ret = mlx5_devx_regex_register_read(priv->ctx, id,
+ ret = mlx5_devx_regex_register_read(ctx, id,
tmp_addr, ®_val);
if (ret)
goto parse_error;
@@ -359,7 +360,7 @@ rxp_program_rof(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
tmp_addr = rxp_get_reg_address(address);
if (tmp_addr == UINT32_MAX)
goto parse_error;
- ret = mlx5_devx_regex_register_read(priv->ctx, id,
+ ret = mlx5_devx_regex_register_read(ctx, id,
tmp_addr, ®_val);
if (ret)
goto parse_error;
@@ -395,7 +396,7 @@ rxp_program_rof(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
if (tmp_addr == UINT32_MAX)
goto parse_error;
- ret = mlx5_devx_regex_register_read(priv->ctx, id,
+ ret = mlx5_devx_regex_register_read(ctx, id,
tmp_addr, ®_val);
if (ret) {
DRV_LOG(ERR, "RXP CSR read failed!");
@@ -418,17 +419,16 @@ rxp_program_rof(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
*/
temp = val;
ret |= mlx5_devx_regex_register_write
- (priv->ctx, id,
+ (ctx, id,
MLX5_RXP_RTRU_CSR_DATA_0, temp);
temp = (uint32_t)(val >> 32);
ret |= mlx5_devx_regex_register_write
- (priv->ctx, id,
+ (ctx, id,
MLX5_RXP_RTRU_CSR_DATA_0 +
MLX5_RXP_CSR_WIDTH, temp);
temp = address;
ret |= mlx5_devx_regex_register_write
- (priv->ctx, id, MLX5_RXP_RTRU_CSR_ADDR,
- temp);
+ (ctx, id, MLX5_RXP_RTRU_CSR_ADDR, temp);
if (ret) {
DRV_LOG(ERR,
"Failed to copy instructions to RXP.");
@@ -506,13 +506,14 @@ mlnx_set_database(struct mlx5_regex_priv *priv, uint8_t id, uint8_t db_to_use)
int ret;
uint32_t umem_id;
- ret = mlx5_devx_regex_database_stop(priv->ctx, id);
+ ret = mlx5_devx_regex_database_stop(priv->dev_ctx->ctx, id);
if (ret < 0) {
DRV_LOG(ERR, "stop engine failed!");
return ret;
}
umem_id = mlx5_os_get_umem_id(priv->db[db_to_use].umem.umem);
- ret = mlx5_devx_regex_database_program(priv->ctx, id, umem_id, 0);
+ ret = mlx5_devx_regex_database_program(priv->dev_ctx->ctx,
+ id, umem_id, 0);
if (ret < 0) {
DRV_LOG(ERR, "program db failed!");
return ret;
@@ -523,7 +524,7 @@ mlnx_set_database(struct mlx5_regex_priv *priv, uint8_t id, uint8_t db_to_use)
static int
mlnx_resume_database(struct mlx5_regex_priv *priv, uint8_t id)
{
- mlx5_devx_regex_database_resume(priv->ctx, id);
+ mlx5_devx_regex_database_resume(priv->dev_ctx->ctx, id);
return 0;
}
@@ -588,13 +589,13 @@ program_rxp_rules(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
{
int ret;
uint32_t val;
+ struct ibv_context *ctx = priv->dev_ctx->ctx;
ret = rxp_init_eng(priv, id);
if (ret < 0)
return ret;
/* Confirm the RXP is initialised. */
- if (mlx5_devx_regex_register_read(priv->ctx, id,
- MLX5_RXP_CSR_STATUS, &val)) {
+ if (mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_STATUS, &val)) {
DRV_LOG(ERR, "Failed to read from RXP!");
return -ENODEV;
}
@@ -602,14 +603,14 @@ program_rxp_rules(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
DRV_LOG(ERR, "RXP not initialised...");
return -EBUSY;
}
- ret = mlx5_devx_regex_register_read(priv->ctx, id,
+ ret = mlx5_devx_regex_register_read(ctx, id,
MLX5_RXP_RTRU_CSR_CTRL, &val);
if (ret) {
DRV_LOG(ERR, "CSR read failed!");
return -1;
}
val |= MLX5_RXP_RTRU_CSR_CTRL_GO;
- ret = mlx5_devx_regex_register_write(priv->ctx, id,
+ ret = mlx5_devx_regex_register_write(ctx, id,
MLX5_RXP_RTRU_CSR_CTRL, val);
if (ret) {
DRV_LOG(ERR, "Can't program rof file!");
@@ -622,7 +623,7 @@ program_rxp_rules(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
}
if (priv->is_bf2) {
ret = rxp_poll_csr_for_value
- (priv->ctx, &val, MLX5_RXP_RTRU_CSR_STATUS,
+ (ctx, &val, MLX5_RXP_RTRU_CSR_STATUS,
MLX5_RXP_RTRU_CSR_STATUS_UPDATE_DONE,
MLX5_RXP_RTRU_CSR_STATUS_UPDATE_DONE,
MLX5_RXP_POLL_CSR_FOR_VALUE_TIMEOUT, id);
@@ -632,29 +633,27 @@ program_rxp_rules(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
}
DRV_LOG(DEBUG, "Rules update took %d cycles", ret);
}
- if (mlx5_devx_regex_register_read(priv->ctx, id, MLX5_RXP_RTRU_CSR_CTRL,
+ if (mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_RTRU_CSR_CTRL,
&val)) {
DRV_LOG(ERR, "CSR read failed!");
return -1;
}
val &= ~(MLX5_RXP_RTRU_CSR_CTRL_GO);
- if (mlx5_devx_regex_register_write(priv->ctx, id,
+ if (mlx5_devx_regex_register_write(ctx, id,
MLX5_RXP_RTRU_CSR_CTRL, val)) {
DRV_LOG(ERR, "CSR write failed!");
return -1;
}
- ret = mlx5_devx_regex_register_read(priv->ctx, id, MLX5_RXP_CSR_CTRL,
- &val);
+ ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_CTRL, &val);
if (ret)
return ret;
val &= ~MLX5_RXP_CSR_CTRL_INIT;
- ret = mlx5_devx_regex_register_write(priv->ctx, id, MLX5_RXP_CSR_CTRL,
- val);
+ ret = mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_CSR_CTRL, val);
if (ret)
return ret;
rxp_init_rtru(priv, id, MLX5_RXP_RTRU_CSR_CTRL_INIT_MODE_L1_L2);
if (priv->is_bf2) {
- ret = rxp_poll_csr_for_value(priv->ctx, &val,
+ ret = rxp_poll_csr_for_value(ctx, &val,
MLX5_RXP_CSR_STATUS,
MLX5_RXP_CSR_STATUS_INIT_DONE,
MLX5_RXP_CSR_STATUS_INIT_DONE,
@@ -670,9 +669,7 @@ program_rxp_rules(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
DRV_LOG(ERR, "Failed to resume engine!");
return ret;
}
-
return ret;
-
}
static int
@@ -680,7 +677,7 @@ rxp_init_eng(struct mlx5_regex_priv *priv, uint8_t id)
{
uint32_t ctrl;
uint32_t reg;
- struct ibv_context *ctx = priv->ctx;
+ struct ibv_context *ctx = priv->dev_ctx->ctx;
int ret;
ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_CTRL, &ctrl);
@@ -758,9 +755,10 @@ rxp_db_setup(struct mlx5_regex_priv *priv)
goto tidyup_error;
}
/* Register the memory. */
- priv->db[i].umem.umem = mlx5_glue->devx_umem_reg(priv->ctx,
- priv->db[i].ptr,
- MLX5_MAX_DB_SIZE, 7);
+ priv->db[i].umem.umem = mlx5_glue->devx_umem_reg
+ (priv->dev_ctx->ctx,
+ priv->db[i].ptr,
+ MLX5_MAX_DB_SIZE, 7);
if (!priv->db[i].umem.umem) {
DRV_LOG(ERR, "Failed to register memory!");
ret = ENODEV;
@@ -804,14 +802,14 @@ mlx5_regex_rules_db_import(struct rte_regexdev *dev,
}
if (rule_db_len == 0)
return -EINVAL;
- if (mlx5_devx_regex_register_read(priv->ctx, 0,
+ if (mlx5_devx_regex_register_read(priv->dev_ctx->ctx, 0,
MLX5_RXP_CSR_BASE_ADDRESS, &ver)) {
DRV_LOG(ERR, "Failed to read Main CSRs Engine 0!");
return -1;
}
/* Need to ensure RXP not busy before stop! */
for (id = 0; id < priv->nb_engines; id++) {
- ret = rxp_stop_engine(priv->ctx, id);
+ ret = rxp_stop_engine(priv->dev_ctx->ctx, id);
if (ret) {
DRV_LOG(ERR, "Can't stop engine.");
ret = -ENODEV;
@@ -823,7 +821,7 @@ mlx5_regex_rules_db_import(struct rte_regexdev *dev,
ret = -ENODEV;
goto tidyup_error;
}
- ret = rxp_start_engine(priv->ctx, id);
+ ret = rxp_start_engine(priv->dev_ctx->ctx, id);
if (ret) {
DRV_LOG(ERR, "Can't start engine.");
ret = -ENODEV;
--
2.25.1
next prev parent reply other threads:[~2021-08-17 13:46 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-08-17 13:44 [dpdk-dev] [RFC 00/21] mlx5: sharing global MR cache between drivers Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 01/21] net/mlx5: fix shared device context creation error flow Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 02/21] net/mlx5: fix PCI probing " Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 03/21] common/mlx5: add context device structure Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 04/21] compress/mlx5: use " Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 05/21] crypto/mlx5: " Michael Baum
2021-08-17 13:44 ` Michael Baum [this message]
2021-08-17 13:44 ` [dpdk-dev] [RFC 07/21] net/mlx5: improve probe function on Windows Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 08/21] net/mlx5: improve probe function on Linux Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 09/21] net/mlx5: improve spawn function Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 10/21] net/mlx5: use context device structure Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 11/21] net/mlx5: move NUMA node field to context device Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 12/21] common/mlx5: add ROCE disable in context device creation Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 13/21] vdpa/mlx5: use context device structure Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 14/21] mlx5: update device sent to probing Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 15/21] mlx5: share context device structure between drivers Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 16/21] common/mlx5: add HCA attributes to context device structure Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 17/21] regex/mlx5: use HCA attributes from context device Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 18/21] vdpa/mlx5: " Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 19/21] compress/mlx5: " Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 20/21] crypto/mlx5: " Michael Baum
2021-08-17 13:44 ` [dpdk-dev] [RFC 21/21] net/mlx5: " Michael Baum
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210817134441.1966618-7-michaelba@nvidia.com \
--to=michaelba@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=rasland@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).