From: Ophir Munk <ophirmu@nvidia.com>
To: dev@dpdk.org
Cc: Ophir Munk <ophirmu@nvidia.com>,
Gregory Etelson <getelson@mellanox.com>,
Ophir Munk <ophirmu@mellanox.com>
Subject: [dpdk-dev] [PATCH v1 10/13] net/mlx5: remove more DV dependencies
Date: Thu, 20 Aug 2020 14:50:25 +0000 [thread overview]
Message-ID: <20200820145028.4090-10-ophirmu@nvidia.com> (raw)
In-Reply-To: <20200820145028.4090-1-ophirmu@nvidia.com>
From: Ophir Munk <ophirmu@mellanox.com>
Several DV-based structs of type 'struct mlx5dv_devx_XXX' are replaced
with 'void *' to enable compilation under non-Linux operating systems.
New getter functions were added to retrieve the specific fields that
were previously accessed directly.
Replaced structs:
'struct mlx5dv_pp *'
'struct mlx5dv_devx_event_channel *'
'struct mlx5dv_devx_umem *'
'struct mlx5dv_devx_uar *'
Signed-off-by: Ophir Munk <ophirmu@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
---
drivers/common/mlx5/linux/mlx5_common_os.h | 91 ++++++++++++++++++++++++++++++
drivers/net/mlx5/mlx5.c | 14 +++--
drivers/net/mlx5/mlx5.h | 12 ++--
drivers/net/mlx5/mlx5_rxtx.h | 10 ++--
drivers/net/mlx5/mlx5_txpp.c | 38 +++++++------
drivers/net/mlx5/mlx5_txq.c | 17 +++---
6 files changed, 144 insertions(+), 38 deletions(-)
diff --git a/drivers/common/mlx5/linux/mlx5_common_os.h b/drivers/common/mlx5/linux/mlx5_common_os.h
index 55c0902..8301d90 100644
--- a/drivers/common/mlx5/linux/mlx5_common_os.h
+++ b/drivers/common/mlx5/linux/mlx5_common_os.h
@@ -90,4 +90,95 @@ mlx5_os_get_umem_id(void *umem)
return 0;
return ((struct mlx5dv_devx_umem *)umem)->umem_id;
}
+
+/**
+ * Get fd. Given a pointer to DevX channel object of type
+ * 'struct mlx5dv_devx_event_channel*' - return its fd.
+ *
+ * @param[in] channel
+ * Pointer to channel object.
+ *
+ * @return
+ * The fd if channel is valid, 0 otherwise.
+ */
+static inline int
+mlx5_os_get_devx_channel_fd(void *channel)
+{
+ if (!channel)
+ return 0;
+ return ((struct mlx5dv_devx_event_channel *)channel)->fd;
+}
+
+/**
+ * Get mmap offset. Given a pointer to an DevX UAR object of type
+ * 'struct mlx5dv_devx_uar *' - return its mmap offset.
+ *
+ * @param[in] uar
+ * Pointer to UAR object.
+ *
+ * @return
+ * The mmap offset if uar is valid, 0 otherwise.
+ */
+static inline off_t
+mlx5_os_get_devx_uar_mmap_offset(void *uar)
+{
+ if (!uar)
+ return 0;
+ return ((struct mlx5dv_devx_uar *)uar)->mmap_off;
+}
+
+/**
+ * Get base addr pointer. Given a pointer to an UAR object of type
+ * 'struct mlx5dv_devx_uar *' - return its base address.
+ *
+ * @param[in] uar
+ * Pointer to an UAR object.
+ *
+ * @return
+ * The base address if UAR is valid, 0 otherwise.
+ */
+static inline void *
+mlx5_os_get_devx_uar_base_addr(void *uar)
+{
+ if (!uar)
+ return 0;
+ return ((struct mlx5dv_devx_uar *)uar)->base_addr;
+}
+
+/**
+ * Get reg addr pointer. Given a pointer to an UAR object of type
+ * 'struct mlx5dv_devx_uar *' - return its reg address.
+ *
+ * @param[in] uar
+ * Pointer to an UAR object.
+ *
+ * @return
+ * The reg address if UAR is valid, 0 otherwise.
+ */
+static inline void *
+mlx5_os_get_devx_uar_reg_addr(void *uar)
+{
+ if (!uar)
+ return 0;
+ return ((struct mlx5dv_devx_uar *)uar)->reg_addr;
+}
+
+/**
+ * Get page id. Given a pointer to an UAR object of type
+ * 'struct mlx5dv_devx_uar *' - return its page id.
+ *
+ * @param[in] uar
+ * Pointer to an UAR object.
+ *
+ * @return
+ * The page id if UAR is valid, 0 otherwise.
+ */
+static inline uint32_t
+mlx5_os_get_devx_uar_page_id(void *uar)
+{
+ if (!uar)
+ return 0;
+ return ((struct mlx5dv_devx_uar *)uar)->page_id;
+}
+
#endif /* RTE_PMD_MLX5_COMMON_OS_H_ */
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index fdda6ff..4a807fb 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -723,6 +723,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
{
uint32_t uar_mapping, retry;
int err = 0;
+ void *base_addr;
for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
@@ -781,7 +782,8 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
err = ENOMEM;
goto exit;
}
- if (sh->tx_uar->base_addr)
+ base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
+ if (base_addr)
break;
/*
* The UARs are allocated by rdma_core within the
@@ -820,7 +822,8 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
err = ENOMEM;
goto exit;
}
- if (sh->devx_rx_uar->base_addr)
+ base_addr = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar);
+ if (base_addr)
break;
/*
* The UARs are allocated by rdma_core within the
@@ -943,8 +946,11 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
err = mlx5_alloc_rxtx_uars(sh, config);
if (err)
goto error;
- MLX5_ASSERT(sh->tx_uar && sh->tx_uar->base_addr);
- MLX5_ASSERT(sh->devx_rx_uar && sh->devx_rx_uar->base_addr);
+ MLX5_ASSERT(sh->tx_uar);
+ MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar));
+
+ MLX5_ASSERT(sh->devx_rx_uar);
+ MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar));
}
sh->flow_id_pool = mlx5_flow_id_pool_alloc
((1 << HAIRPIN_FLOW_ID_BITS) - 1);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index a45bd0b..34d7a15 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -527,7 +527,7 @@ struct mlx5_flow_id_pool {
struct mlx5_txpp_wq {
/* Completion Queue related data.*/
struct mlx5_devx_obj *cq;
- struct mlx5dv_devx_umem *cq_umem;
+ void *cq_umem;
union {
volatile void *cq_buf;
volatile struct mlx5_cqe *cqes;
@@ -537,7 +537,7 @@ struct mlx5_txpp_wq {
uint32_t arm_sn:2;
/* Send Queue related data.*/
struct mlx5_devx_obj *sq;
- struct mlx5dv_devx_umem *sq_umem;
+ void *sq_umem;
union {
volatile void *sq_buf;
volatile struct mlx5_wqe *wqes;
@@ -563,10 +563,10 @@ struct mlx5_dev_txpp {
int32_t skew; /* Scheduling skew. */
uint32_t eqn; /* Event Queue number. */
struct rte_intr_handle intr_handle; /* Periodic interrupt. */
- struct mlx5dv_devx_event_channel *echan; /* Event Channel. */
+ void *echan; /* Event Channel. */
struct mlx5_txpp_wq clock_queue; /* Clock Queue. */
struct mlx5_txpp_wq rearm_queue; /* Clock Queue. */
- struct mlx5dv_pp *pp; /* Packet pacing context. */
+ void *pp; /* Packet pacing context. */
uint16_t pp_id; /* Packet pacing context index. */
uint16_t ts_n; /* Number of captured timestamps. */
uint16_t ts_p; /* Pointer to statisticks timestamp. */
@@ -653,10 +653,10 @@ struct mlx5_dev_ctx_shared {
struct mlx5_devx_obj *tis; /* TIS object. */
struct mlx5_devx_obj *td; /* Transport domain. */
struct mlx5_flow_id_pool *flow_id_pool; /* Flow ID pool. */
- struct mlx5dv_devx_uar *tx_uar; /* Tx/packer pacing shared UAR. */
+ void *tx_uar; /* Tx/packet pacing shared UAR. */
struct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX];
/* Flex parser profiles information. */
- struct mlx5dv_devx_uar *devx_rx_uar; /* DevX UAR for Rx. */
+ void *devx_rx_uar; /* DevX UAR for Rx. */
struct mlx5_dev_shared_port port[]; /* per device port data array. */
};
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index c02a007..0fc7754 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -185,7 +185,7 @@ struct mlx5_rxq_obj {
struct {
struct mlx5_devx_obj *rq; /* DevX Rx Queue object. */
struct mlx5_devx_obj *devx_cq; /* DevX CQ object. */
- struct mlx5dv_devx_event_channel *devx_channel;
+ void *devx_channel;
};
};
};
@@ -212,8 +212,8 @@ struct mlx5_rxq_ctrl {
uint32_t cq_dbr_umem_id;
uint64_t cq_dbr_offset;
/* Storing CQ door-bell information, needed when freeing door-bell. */
- struct mlx5dv_devx_umem *wq_umem; /* WQ buffer registration info. */
- struct mlx5dv_devx_umem *cq_umem; /* CQ buffer registration info. */
+ void *wq_umem; /* WQ buffer registration info. */
+ void *cq_umem; /* CQ buffer registration info. */
struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
};
@@ -361,12 +361,12 @@ struct mlx5_txq_obj {
struct {
struct rte_eth_dev *dev;
struct mlx5_devx_obj *cq_devx;
- struct mlx5dv_devx_umem *cq_umem;
+ void *cq_umem;
void *cq_buf;
int64_t cq_dbrec_offset;
struct mlx5_devx_dbr_page *cq_dbrec_page;
struct mlx5_devx_obj *sq_devx;
- struct mlx5dv_devx_umem *sq_umem;
+ void *sq_umem;
void *sq_buf;
int64_t sq_dbrec_offset;
struct mlx5_devx_dbr_page *sq_dbrec_page;
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 14d4a66..5aa73dd 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -113,13 +113,13 @@ mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh)
rte_errno = errno;
return -errno;
}
- if (!sh->txpp.pp->index) {
+ if (!(((struct mlx5dv_pp *)(sh->txpp.pp))->index)) {
DRV_LOG(ERR, "Zero packet pacing index allocated.");
mlx5_txpp_free_pp_index(sh);
rte_errno = ENOTSUP;
return -ENOTSUP;
}
- sh->txpp.pp_id = sh->txpp.pp->index;
+ sh->txpp.pp_id = ((struct mlx5dv_pp *)(sh->txpp.pp))->index;
return 0;
#else
RTE_SET_USED(sh);
@@ -175,6 +175,7 @@ mlx5_txpp_doorbell_rearm_queue(struct mlx5_dev_ctx_shared *sh, uint16_t ci)
uint32_t w32[2];
uint64_t w64;
} cs;
+ void *reg_addr;
wq->sq_ci = ci + 1;
cs.w32[0] = rte_cpu_to_be_32(rte_be_to_cpu_32
@@ -186,7 +187,8 @@ mlx5_txpp_doorbell_rearm_queue(struct mlx5_dev_ctx_shared *sh, uint16_t ci)
/* Make sure the doorbell record is updated. */
rte_wmb();
/* Write to doorbel register to start processing. */
- __mlx5_uar_write64_relaxed(cs.w64, sh->tx_uar->reg_addr, NULL);
+ reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
+ __mlx5_uar_write64_relaxed(cs.w64, reg_addr, NULL);
rte_wmb();
}
@@ -282,7 +284,7 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
/* Create completion queue object for Rearm Queue. */
cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
- cq_attr.uar_page_id = sh->tx_uar->page_id;
+ cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
cq_attr.eqn = sh->txpp.eqn;
cq_attr.q_umem_valid = 1;
cq_attr.q_umem_offset = 0;
@@ -335,7 +337,7 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
sq_attr.tis_num = sh->tis->id;
sq_attr.cqn = wq->cq->id;
sq_attr.cd_master = 1;
- sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
+ sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
sq_attr.wq_attr.pd = sh->pdn;
sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
@@ -522,14 +524,14 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
cq_attr.use_first_only = 1;
cq_attr.overrun_ignore = 1;
- cq_attr.uar_page_id = sh->tx_uar->page_id;
+ cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
cq_attr.eqn = sh->txpp.eqn;
cq_attr.q_umem_valid = 1;
cq_attr.q_umem_offset = 0;
- cq_attr.q_umem_id = wq->cq_umem->umem_id;
+ cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
cq_attr.db_umem_valid = 1;
cq_attr.db_umem_offset = umem_dbrec;
- cq_attr.db_umem_id = wq->cq_umem->umem_id;
+ cq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_CLKQ_SIZE);
cq_attr.log_page_size = rte_log2_u32(page_size);
wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
@@ -587,16 +589,16 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
sq_attr.cqn = wq->cq->id;
sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
sq_attr.wq_attr.cd_slave = 1;
- sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
+ sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
sq_attr.wq_attr.pd = sh->pdn;
sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
sq_attr.wq_attr.dbr_umem_valid = 1;
sq_attr.wq_attr.dbr_addr = umem_dbrec;
- sq_attr.wq_attr.dbr_umem_id = wq->sq_umem->umem_id;
+ sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
sq_attr.wq_attr.wq_umem_valid = 1;
- sq_attr.wq_attr.wq_umem_id = wq->sq_umem->umem_id;
+ sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
/* umem_offset must be zero for static_sq_wq queue. */
sq_attr.wq_attr.wq_umem_offset = 0;
wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
@@ -630,11 +632,14 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
static inline void
mlx5_txpp_cq_arm(struct mlx5_dev_ctx_shared *sh)
{
+ void *base_addr;
+
struct mlx5_txpp_wq *aq = &sh->txpp.rearm_queue;
uint32_t arm_sn = aq->arm_sn << MLX5_CQ_SQN_OFFSET;
uint32_t db_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | aq->cq_ci;
uint64_t db_be = rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq->id);
- uint32_t *addr = RTE_PTR_ADD(sh->tx_uar->base_addr, MLX5_CQ_DOORBELL);
+ base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
+ uint32_t *addr = RTE_PTR_ADD(base_addr, MLX5_CQ_DOORBELL);
rte_compiler_barrier();
aq->cq_dbrec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi);
@@ -881,8 +886,8 @@ static int
mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)
{
uint16_t event_nums[1] = {0};
- int flags;
int ret;
+ int fd;
rte_atomic32_set(&sh->txpp.err_miss_int, 0);
rte_atomic32_set(&sh->txpp.err_rearm_queue, 0);
@@ -890,15 +895,16 @@ mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)
rte_atomic32_set(&sh->txpp.err_ts_past, 0);
rte_atomic32_set(&sh->txpp.err_ts_future, 0);
/* Attach interrupt handler to process Rearm Queue completions. */
- flags = fcntl(sh->txpp.echan->fd, F_GETFL);
- ret = fcntl(sh->txpp.echan->fd, F_SETFL, flags | O_NONBLOCK);
+ fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
+ ret = mlx5_os_set_nonblock_channel_fd(fd);
if (ret) {
DRV_LOG(ERR, "Failed to change event channel FD.");
rte_errno = errno;
return -rte_errno;
}
memset(&sh->txpp.intr_handle, 0, sizeof(sh->txpp.intr_handle));
- sh->txpp.intr_handle.fd = sh->txpp.echan->fd;
+ fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
+ sh->txpp.intr_handle.fd = fd;
sh->txpp.intr_handle.type = RTE_INTR_HANDLE_EXT;
if (rte_intr_callback_register(&sh->txpp.intr_handle,
mlx5_txpp_interrupt_handler, sh)) {
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 21fe16b..fed9d8a 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -907,6 +907,7 @@ mlx5_txq_obj_devx_new(struct rte_eth_dev *dev, uint16_t idx)
size_t page_size;
struct mlx5_cqe *cqe;
uint32_t i, nqe;
+ void *reg_addr;
size_t alignment = (size_t)-1;
int ret = 0;
@@ -991,11 +992,11 @@ mlx5_txq_obj_devx_new(struct rte_eth_dev *dev, uint16_t idx)
/* Create completion queue object with DevX. */
cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
- cq_attr.uar_page_id = sh->tx_uar->page_id;
+ cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
cq_attr.eqn = sh->txpp.eqn;
cq_attr.q_umem_valid = 1;
cq_attr.q_umem_offset = (uintptr_t)txq_obj->cq_buf % page_size;
- cq_attr.q_umem_id = txq_obj->cq_umem->umem_id;
+ cq_attr.q_umem_id = mlx5_os_get_umem_id(txq_obj->cq_umem);
cq_attr.db_umem_valid = 1;
cq_attr.db_umem_offset = txq_obj->cq_dbrec_offset;
cq_attr.db_umem_id = mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem);
@@ -1069,7 +1070,7 @@ mlx5_txq_obj_devx_new(struct rte_eth_dev *dev, uint16_t idx)
sq_attr.allow_multi_pkt_send_wqe = !!priv->config.mps;
sq_attr.allow_swp = !!priv->config.swp;
sq_attr.min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode;
- sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
+ sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
sq_attr.wq_attr.pd = sh->pdn;
sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
@@ -1079,7 +1080,7 @@ mlx5_txq_obj_devx_new(struct rte_eth_dev *dev, uint16_t idx)
sq_attr.wq_attr.dbr_umem_id =
mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem);
sq_attr.wq_attr.wq_umem_valid = 1;
- sq_attr.wq_attr.wq_umem_id = txq_obj->sq_umem->umem_id;
+ sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(txq_obj->sq_umem);
sq_attr.wq_attr.wq_umem_offset = (uintptr_t)txq_obj->sq_buf % page_size;
txq_obj->sq_devx = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
if (!txq_obj->sq_devx) {
@@ -1120,9 +1121,11 @@ mlx5_txq_obj_devx_new(struct rte_eth_dev *dev, uint16_t idx)
priv->sh->tdn = priv->sh->td->id;
#endif
MLX5_ASSERT(sh->tx_uar);
- MLX5_ASSERT(sh->tx_uar->reg_addr);
- txq_ctrl->bf_reg = sh->tx_uar->reg_addr;
- txq_ctrl->uar_mmap_offset = sh->tx_uar->mmap_off;
+ reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
+ MLX5_ASSERT(reg_addr);
+ txq_ctrl->bf_reg = reg_addr;
+ txq_ctrl->uar_mmap_offset =
+ mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
rte_atomic32_set(&txq_obj->refcnt, 1);
txq_uar_init(txq_ctrl);
LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next);
--
2.8.4
next prev parent reply other threads:[~2020-08-20 14:53 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-08-20 14:50 [dpdk-dev] [PATCH v1 01/13] common/mlx5: replace strsep with strtok_r Ophir Munk
2020-08-20 14:50 ` [dpdk-dev] [PATCH v1 02/13] common/mlx5: replace linux __bexx types with rte Ophir Munk
2020-08-20 14:50 ` [dpdk-dev] [PATCH v1 03/13] net/mlx5: rename mlx5 enumeration REG_NONE Ophir Munk
2020-08-20 14:50 ` [dpdk-dev] [PATCH v1 04/13] net/mlx5: move mlx5_get_ifname prototype under Linux Ophir Munk
2020-08-20 14:50 ` [dpdk-dev] [PATCH v1 05/13] net/mlx5: fix removal of unused inclusion files Ophir Munk
2020-08-20 14:50 ` [dpdk-dev] [PATCH v1 06/13] net/mlx5: remove Netlink dependency in shared code Ophir Munk
2020-08-20 14:50 ` [dpdk-dev] [PATCH v1 07/13] net/mlx5: fix unused utility macros Ophir Munk
2020-08-20 14:50 ` [dpdk-dev] [PATCH v1 08/13] net/mlx5: call meter detach only if DR is supported Ophir Munk
2020-08-20 14:50 ` [dpdk-dev] [PATCH v1 09/13] net/mlx5: add ICMP protocol number definition Ophir Munk
2020-08-20 14:50 ` Ophir Munk [this message]
2020-08-20 14:50 ` [dpdk-dev] [PATCH v1 11/13] net/mlx5: remove ibv_* dependency in rx/tx objects Ophir Munk
2020-08-20 14:50 ` [dpdk-dev] [PATCH v1 12/13] net/mlx5: separate vlan strip modification Ophir Munk
2020-08-20 14:50 ` [dpdk-dev] [PATCH v1 13/13] linux/mlx5: refactor VLAN Ophir Munk
2020-08-25 9:31 ` [dpdk-dev] [PATCH v2 00/13] mlx5 PMD multi OS support - part #4 Ophir Munk
2020-08-25 9:31 ` [dpdk-dev] [PATCH v2 01/13] common/mlx5: replace strsep with strtok_r Ophir Munk
2020-08-25 9:31 ` [dpdk-dev] [PATCH v2 02/13] common/mlx5: replace Linux __bexx types with rte Ophir Munk
2020-08-25 9:31 ` [dpdk-dev] [PATCH v2 03/13] net/mlx5: rename mlx5 enumeration REG_NONE Ophir Munk
2020-08-25 9:31 ` [dpdk-dev] [PATCH v2 04/13] net/mlx5: move mlx5_get_ifname prototype under Linux Ophir Munk
2020-08-25 9:31 ` [dpdk-dev] [PATCH v2 05/13] net/mlx5: fix removal of unused inclusion files Ophir Munk
2020-08-25 9:31 ` [dpdk-dev] [PATCH v2 06/13] net/mlx5: remove Netlink dependency in shared code Ophir Munk
2020-08-25 9:31 ` [dpdk-dev] [PATCH v2 07/13] net/mlx5: fix unused utility macros Ophir Munk
2020-08-25 9:31 ` [dpdk-dev] [PATCH v2 08/13] net/mlx5: call meter detach only if DR is supported Ophir Munk
2020-08-25 9:31 ` [dpdk-dev] [PATCH v2 09/13] net/mlx5: add ICMP protocol number definition Ophir Munk
2020-09-22 11:49 ` Thomas Monjalon
2020-09-22 12:20 ` Ophir Munk
2020-08-25 9:31 ` [dpdk-dev] [PATCH v2 10/13] net/mlx5: remove more DV dependencies Ophir Munk
2020-08-25 9:31 ` [dpdk-dev] [PATCH v2 11/13] net/mlx5: remove ibv_* dependency in Rx/Tx objects Ophir Munk
2020-08-25 9:31 ` [dpdk-dev] [PATCH v2 12/13] net/mlx5: separate VLAN strip modification Ophir Munk
2020-08-25 9:31 ` [dpdk-dev] [PATCH v2 13/13] linux/mlx5: refactor VLAN Ophir Munk
[not found] ` <20200825092943.26312-1-ophirmu@mellanox.com>
2020-08-27 9:53 ` [dpdk-dev] [PATCH v2 00/13] mlx5 PMD multi OS support - part #4 Raslan Darawsheh
[not found] <20200820142834.2984-1-ophirmu@mellanox.com>
2020-08-20 14:28 ` [dpdk-dev] [PATCH v1 10/13] net/mlx5: remove more DV dependencies Ophir Munk
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200820145028.4090-10-ophirmu@nvidia.com \
--to=ophirmu@nvidia.com \
--cc=dev@dpdk.org \
--cc=getelson@mellanox.com \
--cc=ophirmu@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).