From: Michael Baum <michaelba@nvidia.com>
To: dev@dpdk.org
Cc: Matan Azrad <matan@nvidia.com>,
Raslan Darawsheh <rasland@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [dpdk-dev] [PATCH v1 06/15] net/mlx5: reposition the event queue number field
Date: Thu, 1 Oct 2020 14:09:17 +0000 [thread overview]
Message-ID: <1601561366-1821-7-git-send-email-michaelba@nvidia.com> (raw)
In-Reply-To: <1601561366-1821-1-git-send-email-michaelba@nvidia.com>
The eqn field has become a field of sh directly since it is also
relevant for Tx and Rx.
Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/net/mlx5/mlx5.c | 10 ++++++++++
drivers/net/mlx5/mlx5.h | 2 +-
drivers/net/mlx5/mlx5_devx.c | 9 +--------
drivers/net/mlx5/mlx5_txpp.c | 28 ++++++++--------------------
drivers/net/mlx5/mlx5_txq.c | 2 +-
5 files changed, 21 insertions(+), 30 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 01ead6e..e5ca392 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -925,6 +925,16 @@ struct mlx5_dev_ctx_shared *
goto error;
}
if (sh->devx) {
+ uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
+
+ /* Query the EQN for this core. */
+ err = mlx5_glue->devx_query_eqn(sh->ctx, lcore, &sh->eqn);
+ if (err) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to query event queue number %d.",
+ rte_errno);
+ goto error;
+ }
err = mlx5_os_get_pdn(sh->pd, &sh->pdn);
if (err) {
DRV_LOG(ERR, "Fail to extract pdn from PD");
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index bd91e16..050d3a9 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -561,7 +561,6 @@ struct mlx5_dev_txpp {
uint32_t tick; /* Completion tick duration in nanoseconds. */
uint32_t test; /* Packet pacing test mode. */
int32_t skew; /* Scheduling skew. */
- uint32_t eqn; /* Event Queue number. */
struct rte_intr_handle intr_handle; /* Periodic interrupt. */
void *echan; /* Event Channel. */
struct mlx5_txpp_wq clock_queue; /* Clock Queue. */
@@ -603,6 +602,7 @@ struct mlx5_dev_ctx_shared {
LIST_ENTRY(mlx5_dev_ctx_shared) next;
uint32_t refcnt;
uint32_t devx:1; /* Opened with DV. */
+ uint32_t eqn; /* Event Queue number. */
uint32_t max_port; /* Maximal IB device port index. */
void *ctx; /* Verbs/DV/DevX context. */
void *pd; /* Protection Domain. */
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index cb4a522..cddfe43 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -350,11 +350,9 @@
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
size_t page_size = rte_mem_page_size();
- uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
struct mlx5_devx_dbr_page *dbr_page;
int64_t dbr_offset;
- uint32_t eqn = 0;
void *buf = NULL;
uint16_t event_nums[1] = {0};
uint32_t log_cqe_n;
@@ -392,12 +390,6 @@
cq_attr.cqe_size = MLX5_CQE_SIZE_128B;
log_cqe_n = log2above(cqe_n);
cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
- /* Query the EQN for this core. */
- if (mlx5_glue->devx_query_eqn(priv->sh->ctx, lcore, &eqn)) {
- DRV_LOG(ERR, "Failed to query EQN for CQ.");
- goto error;
- }
- cq_attr.eqn = eqn;
buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
rxq_ctrl->socket);
if (!buf) {
@@ -425,6 +417,7 @@
rxq_data->cq_uar =
mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
/* Create CQ using DevX API. */
+ cq_attr.eqn = priv->sh->eqn;
cq_attr.uar_page_id =
mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 011e479..37355fa 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -31,36 +31,24 @@
/* Destroy Event Queue Notification Channel. */
static void
-mlx5_txpp_destroy_eqn(struct mlx5_dev_ctx_shared *sh)
+mlx5_txpp_destroy_event_channel(struct mlx5_dev_ctx_shared *sh)
{
if (sh->txpp.echan) {
mlx5_glue->devx_destroy_event_channel(sh->txpp.echan);
sh->txpp.echan = NULL;
}
- sh->txpp.eqn = 0;
}
/* Create Event Queue Notification Channel. */
static int
-mlx5_txpp_create_eqn(struct mlx5_dev_ctx_shared *sh)
+mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh)
{
- uint32_t lcore;
-
MLX5_ASSERT(!sh->txpp.echan);
- lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
- if (mlx5_glue->devx_query_eqn(sh->ctx, lcore, &sh->txpp.eqn)) {
- rte_errno = errno;
- DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
- sh->txpp.eqn = 0;
- return -rte_errno;
- }
sh->txpp.echan = mlx5_glue->devx_create_event_channel(sh->ctx,
MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
if (!sh->txpp.echan) {
- sh->txpp.eqn = 0;
rte_errno = errno;
- DRV_LOG(ERR, "Failed to create event channel %d.",
- rte_errno);
+ DRV_LOG(ERR, "Failed to create event channel %d.", rte_errno);
return -rte_errno;
}
return 0;
@@ -285,7 +273,7 @@
cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
- cq_attr.eqn = sh->txpp.eqn;
+ cq_attr.eqn = sh->eqn;
cq_attr.q_umem_valid = 1;
cq_attr.q_umem_offset = 0;
cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
@@ -525,7 +513,7 @@
cq_attr.use_first_only = 1;
cq_attr.overrun_ignore = 1;
cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
- cq_attr.eqn = sh->txpp.eqn;
+ cq_attr.eqn = sh->eqn;
cq_attr.q_umem_valid = 1;
cq_attr.q_umem_offset = 0;
cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
@@ -951,7 +939,7 @@
sh->txpp.test = !!(tx_pp < 0);
sh->txpp.skew = priv->config.tx_skew;
sh->txpp.freq = priv->config.hca_attr.dev_freq_khz;
- ret = mlx5_txpp_create_eqn(sh);
+ ret = mlx5_txpp_create_event_channel(sh);
if (ret)
goto exit;
ret = mlx5_txpp_alloc_pp_index(sh);
@@ -972,7 +960,7 @@
mlx5_txpp_destroy_rearm_queue(sh);
mlx5_txpp_destroy_clock_queue(sh);
mlx5_txpp_free_pp_index(sh);
- mlx5_txpp_destroy_eqn(sh);
+ mlx5_txpp_destroy_event_channel(sh);
sh->txpp.tick = 0;
sh->txpp.test = 0;
sh->txpp.skew = 0;
@@ -994,7 +982,7 @@
mlx5_txpp_destroy_rearm_queue(sh);
mlx5_txpp_destroy_clock_queue(sh);
mlx5_txpp_free_pp_index(sh);
- mlx5_txpp_destroy_eqn(sh);
+ mlx5_txpp_destroy_event_channel(sh);
sh->txpp.tick = 0;
sh->txpp.test = 0;
sh->txpp.skew = 0;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index f2ecfc4..c678971 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -985,7 +985,7 @@
cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar);
- cq_attr.eqn = priv->sh->txpp.eqn;
+ cq_attr.eqn = priv->sh->eqn;
cq_attr.q_umem_valid = 1;
cq_attr.q_umem_offset = (uintptr_t)txq_obj->cq_buf % page_size;
cq_attr.q_umem_id = mlx5_os_get_umem_id(txq_obj->cq_umem);
--
1.8.3.1
next prev parent reply other threads:[~2020-10-01 14:12 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-10-01 14:09 [dpdk-dev] [PATCH v1 00/15] mlx5 Tx DevX/Verbs separation Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 01/15] net/mlx5: fix send queue doorbell typo Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 02/15] net/mlx5: fix unused variable in Txq creation Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 03/15] net/mlx5: mitigate Tx queue reference counters Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 04/15] net/mlx5: reorder Tx queue DevX object creation Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 05/15] net/mlx5: reorder Tx queue Verbs " Michael Baum
2020-10-01 14:09 ` Michael Baum [this message]
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 07/15] net/mlx5: separate Tx queue object creations Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 08/15] net/mlx5: share Tx control code Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 09/15] net/mlx5: rearrange SQ and CQ creation in DevX module Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 10/15] net/mlx5: rearrange QP creation in Verbs module Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 11/15] net/mlx5: separate Tx queue object modification Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 12/15] net/mlx5: share " Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 13/15] net/mlx5: remove Tx queue object type field Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 14/15] net/mlx5: separate Rx queue state modification Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 15/15] net/mlx5: remove Rx queue object type field Michael Baum
2020-10-06 15:25 ` [dpdk-dev] [PATCH v1 00/15] mlx5 Tx DevX/Verbs separation Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1601561366-1821-7-git-send-email-michaelba@nvidia.com \
--to=michaelba@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=rasland@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).