DPDK patches and discussions
 help / color / mirror / Atom feed
From: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
To: dev@dpdk.org
Cc: adrien.mazarguil@6wind.com, yskoh@mellanox.com, ferruh.yigit@intel.com
Subject: [dpdk-dev] [PATCH v3 04/30] net/mlx5: prefix Tx structures and functions
Date: Mon,  9 Oct 2017 16:44:40 +0200	[thread overview]
Message-ID: <5f2c7e5857ac98f1bbef4b776c431f8991465194.1507560012.git.nelio.laranjeiro@6wind.com> (raw)
In-Reply-To: <cover.1507560012.git.nelio.laranjeiro@6wind.com>
In-Reply-To: <cover.1507560012.git.nelio.laranjeiro@6wind.com>

Prefix struct txq_ctrl and associated function with mlx5.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
---
 drivers/net/mlx5/mlx5.c              |  8 +++----
 drivers/net/mlx5/mlx5.h              |  2 +-
 drivers/net/mlx5/mlx5_mr.c           | 12 ++++++----
 drivers/net/mlx5/mlx5_rxtx.c         | 25 ++++++++++----------
 drivers/net/mlx5/mlx5_rxtx.h         | 27 +++++++++++-----------
 drivers/net/mlx5/mlx5_rxtx_vec_sse.c | 17 +++++++-------
 drivers/net/mlx5/mlx5_stats.c        |  2 +-
 drivers/net/mlx5/mlx5_txq.c          | 45 ++++++++++++++++++------------------
 8 files changed, 72 insertions(+), 66 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 40499b1..7aea5a4 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -225,14 +225,14 @@ mlx5_dev_close(struct rte_eth_dev *dev)
 		/* XXX race condition if mlx5_tx_burst() is still running. */
 		usleep(1000);
 		for (i = 0; (i != priv->txqs_n); ++i) {
-			struct txq *txq = (*priv->txqs)[i];
-			struct txq_ctrl *txq_ctrl;
+			struct mlx5_txq_data *txq = (*priv->txqs)[i];
+			struct mlx5_txq_ctrl *txq_ctrl;
 
 			if (txq == NULL)
 				continue;
-			txq_ctrl = container_of(txq, struct txq_ctrl, txq);
+			txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
 			(*priv->txqs)[i] = NULL;
-			txq_cleanup(txq_ctrl);
+			mlx5_txq_cleanup(txq_ctrl);
 			rte_free(txq_ctrl);
 		}
 		priv->txqs_n = 0;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index ddaf227..1b511e1 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -131,7 +131,7 @@ struct priv {
 	unsigned int rxqs_n; /* RX queues array size. */
 	unsigned int txqs_n; /* TX queues array size. */
 	struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */
-	struct txq *(*txqs)[]; /* TX queues. */
+	struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
 	/* Indirection tables referencing all RX WQs. */
 	struct ibv_rwq_ind_table *(*ind_tables)[];
 	unsigned int ind_tables_n; /* Number of indirection tables. */
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 9a9f73a..6199746 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -175,9 +175,11 @@ mlx5_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
  *   mr->lkey on success, (uint32_t)-1 on failure.
  */
 uint32_t
-txq_mp2mr_reg(struct txq *txq, struct rte_mempool *mp, unsigned int idx)
+mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
+		   unsigned int idx)
 {
-	struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
+	struct mlx5_txq_ctrl *txq_ctrl =
+		container_of(txq, struct mlx5_txq_ctrl, txq);
 	struct ibv_mr *mr;
 
 	/* Add a new entry, register MR first. */
@@ -253,9 +255,9 @@ txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
  *   Pointer to TX queue structure.
  */
 void
-txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
+mlx5_txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
 {
-	struct txq_ctrl *txq_ctrl = arg;
+	struct mlx5_txq_ctrl *txq_ctrl = arg;
 	struct txq_mp2mr_mbuf_check_data data = {
 		.ret = 0,
 	};
@@ -283,5 +285,5 @@ txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
 		    end <= (uintptr_t)mr->addr + mr->length)
 			return;
 	}
-	txq_mp2mr_reg(&txq_ctrl->txq, mp, i);
+	mlx5_txq_mp2mr_reg(&txq_ctrl->txq, mp, i);
 }
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index ad1071b..9389383 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -200,7 +200,7 @@ mlx5_set_ptype_table(void)
  *   Size of tailroom.
  */
 static inline size_t
-tx_mlx5_wq_tailroom(struct txq *txq, void *addr)
+tx_mlx5_wq_tailroom(struct mlx5_txq_data *txq, void *addr)
 {
 	size_t tailroom;
 	tailroom = (uintptr_t)(txq->wqes) +
@@ -258,7 +258,7 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n,
 int
 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
 {
-	struct txq *txq = tx_queue;
+	struct mlx5_txq_data *txq = tx_queue;
 	uint16_t used;
 
 	mlx5_tx_complete(txq);
@@ -334,7 +334,7 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
 uint16_t
 mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 {
-	struct txq *txq = (struct txq *)dpdk_txq;
+	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
 	uint16_t elts_head = txq->elts_head;
 	const uint16_t elts_n = 1 << txq->elts_n;
 	const uint16_t elts_m = elts_n - 1;
@@ -747,7 +747,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
  *   Packet length.
  */
 static inline void
-mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
+mlx5_mpw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, uint32_t length)
 {
 	uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
 	volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
@@ -787,7 +787,7 @@ mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
  *   Pointer to MPW session structure.
  */
 static inline void
-mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
+mlx5_mpw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
 {
 	unsigned int num = mpw->pkts_n;
 
@@ -821,7 +821,7 @@ mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
 uint16_t
 mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 {
-	struct txq *txq = (struct txq *)dpdk_txq;
+	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
 	uint16_t elts_head = txq->elts_head;
 	const uint16_t elts_n = 1 << txq->elts_n;
 	const uint16_t elts_m = elts_n - 1;
@@ -964,7 +964,8 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
  *   Packet length.
  */
 static inline void
-mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
+mlx5_mpw_inline_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw,
+		    uint32_t length)
 {
 	uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
 	struct mlx5_wqe_inl_small *inl;
@@ -999,7 +1000,7 @@ mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
  *   Pointer to MPW session structure.
  */
 static inline void
-mlx5_mpw_inline_close(struct txq *txq, struct mlx5_mpw *mpw)
+mlx5_mpw_inline_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
 {
 	unsigned int size;
 	struct mlx5_wqe_inl_small *inl = (struct mlx5_wqe_inl_small *)
@@ -1034,7 +1035,7 @@ uint16_t
 mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
 			 uint16_t pkts_n)
 {
-	struct txq *txq = (struct txq *)dpdk_txq;
+	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
 	uint16_t elts_head = txq->elts_head;
 	const uint16_t elts_n = 1 << txq->elts_n;
 	const uint16_t elts_m = elts_n - 1;
@@ -1260,7 +1261,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
  *   Packet length.
  */
 static inline void
-mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
+mlx5_empw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, int padding)
 {
 	uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
 
@@ -1302,7 +1303,7 @@ mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
  *   Number of consumed WQEs.
  */
 static inline uint16_t
-mlx5_empw_close(struct txq *txq, struct mlx5_mpw *mpw)
+mlx5_empw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
 {
 	uint16_t ret;
 
@@ -1333,7 +1334,7 @@ mlx5_empw_close(struct txq *txq, struct mlx5_mpw *mpw)
 uint16_t
 mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 {
-	struct txq *txq = (struct txq *)dpdk_txq;
+	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
 	uint16_t elts_head = txq->elts_head;
 	const uint16_t elts_n = 1 << txq->elts_n;
 	const uint16_t elts_m = elts_n - 1;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index a86b6fb..6ffcfb7 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -233,7 +233,7 @@ struct hash_rxq {
 
 /* TX queue descriptor. */
 __extension__
-struct txq {
+struct mlx5_txq_data {
 	uint16_t elts_head; /* Current counter in (*elts)[]. */
 	uint16_t elts_tail; /* Counter of first element awaiting completion. */
 	uint16_t elts_comp; /* Counter since last completion request. */
@@ -271,12 +271,12 @@ struct txq {
 } __rte_cache_aligned;
 
 /* TX queue control descriptor. */
-struct txq_ctrl {
+struct mlx5_txq_ctrl {
 	struct priv *priv; /* Back pointer to private data. */
 	struct ibv_cq *cq; /* Completion Queue. */
 	struct ibv_qp *qp; /* Queue Pair. */
 	unsigned int socket; /* CPU socket ID for allocations. */
-	struct txq txq; /* Data path structure. */
+	struct mlx5_txq_data txq; /* Data path structure. */
 	off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
 };
 
@@ -305,9 +305,9 @@ int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 
 /* mlx5_txq.c */
 
-void txq_cleanup(struct txq_ctrl *);
-int txq_ctrl_setup(struct rte_eth_dev *, struct txq_ctrl *, uint16_t,
-		   unsigned int, const struct rte_eth_txconf *);
+void mlx5_txq_cleanup(struct mlx5_txq_ctrl *);
+int mlx5_txq_ctrl_setup(struct rte_eth_dev *, struct mlx5_txq_ctrl *, uint16_t,
+			unsigned int, const struct rte_eth_txconf *);
 int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
 			const struct rte_eth_txconf *);
 void mlx5_tx_queue_release(void *);
@@ -340,8 +340,9 @@ uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t);
 /* mlx5_mr.c */
 
 struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, struct rte_mempool *);
-void txq_mp2mr_iter(struct rte_mempool *, void *);
-uint32_t txq_mp2mr_reg(struct txq *, struct rte_mempool *, unsigned int);
+void mlx5_txq_mp2mr_iter(struct rte_mempool *, void *);
+uint32_t mlx5_txq_mp2mr_reg(struct mlx5_txq_data *, struct rte_mempool *,
+			    unsigned int);
 
 #ifndef NDEBUG
 /**
@@ -439,7 +440,7 @@ check_cqe(volatile struct mlx5_cqe *cqe,
  *   WQE address.
  */
 static inline uintptr_t *
-tx_mlx5_wqe(struct txq *txq, uint16_t ci)
+tx_mlx5_wqe(struct mlx5_txq_data *txq, uint16_t ci)
 {
 	ci &= ((1 << txq->wqe_n) - 1);
 	return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
@@ -454,7 +455,7 @@ tx_mlx5_wqe(struct txq *txq, uint16_t ci)
  *   Pointer to TX queue structure.
  */
 static __rte_always_inline void
-mlx5_tx_complete(struct txq *txq)
+mlx5_tx_complete(struct mlx5_txq_data *txq)
 {
 	const uint16_t elts_n = 1 << txq->elts_n;
 	const uint16_t elts_m = elts_n - 1;
@@ -559,7 +560,7 @@ mlx5_tx_mb2mp(struct rte_mbuf *buf)
  *   mr->lkey on success, (uint32_t)-1 on failure.
  */
 static __rte_always_inline uint32_t
-mlx5_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
+mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
 {
 	uint16_t i = txq->mr_cache_idx;
 	uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t);
@@ -582,7 +583,7 @@ mlx5_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
 		}
 	}
 	txq->mr_cache_idx = 0;
-	return txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
+	return mlx5_txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
 }
 
 /**
@@ -594,7 +595,7 @@ mlx5_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
  *   Pointer to the last WQE posted in the NIC.
  */
 static __rte_always_inline void
-mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
+mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
 {
 	uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
 	volatile uint64_t *src = ((volatile uint64_t *)wqe);
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c
index b0c87bf..7e5ce6d 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c
@@ -76,7 +76,7 @@
  *   Number of packets to be filled.
  */
 static inline void
-txq_wr_dseg_v(struct txq *txq, __m128i *dseg,
+txq_wr_dseg_v(struct mlx5_txq_data *txq, __m128i *dseg,
 	      struct rte_mbuf **pkts, unsigned int n)
 {
 	unsigned int pos;
@@ -151,8 +151,8 @@ txq_check_multiseg(struct rte_mbuf **pkts, uint16_t pkts_n)
  *   Number of packets having same ol_flags.
  */
 static inline unsigned int
-txq_calc_offload(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
-		 uint8_t *cs_flags)
+txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
+		 uint16_t pkts_n, uint8_t *cs_flags)
 {
 	unsigned int pos;
 	const uint64_t ol_mask =
@@ -202,7 +202,8 @@ txq_calc_offload(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
  *   Number of packets successfully transmitted (<= pkts_n).
  */
 static uint16_t
-txq_scatter_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
+	      uint16_t pkts_n)
 {
 	uint16_t elts_head = txq->elts_head;
 	const uint16_t elts_n = 1 << txq->elts_n;
@@ -332,7 +333,7 @@ txq_scatter_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n)
  *   Number of packets successfully transmitted (<= pkts_n).
  */
 static inline uint16_t
-txq_burst_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
+txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
 	    uint8_t cs_flags)
 {
 	struct rte_mbuf **elts;
@@ -448,7 +449,7 @@ uint16_t
 mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
 		      uint16_t pkts_n)
 {
-	struct txq *txq = (struct txq *)dpdk_txq;
+	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
 	uint16_t nb_tx = 0;
 
 	while (pkts_n > nb_tx) {
@@ -480,7 +481,7 @@ mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
 uint16_t
 mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 {
-	struct txq *txq = (struct txq *)dpdk_txq;
+	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
 	uint16_t nb_tx = 0;
 
 	while (pkts_n > nb_tx) {
@@ -1304,7 +1305,7 @@ priv_check_raw_vec_tx_support(struct priv *priv)
 
 	/* All the configured queues should support. */
 	for (i = 0; i < priv->txqs_n; ++i) {
-		struct txq *txq = (*priv->txqs)[i];
+		struct mlx5_txq_data *txq = (*priv->txqs)[i];
 
 		if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) ||
 		    !(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index 3de3af8..6b4772c 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -350,7 +350,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 		tmp.rx_nombuf += rxq->stats.rx_nombuf;
 	}
 	for (i = 0; (i != priv->txqs_n); ++i) {
-		struct txq *txq = (*priv->txqs)[i];
+		struct mlx5_txq_data *txq = (*priv->txqs)[i];
 
 		if (txq == NULL)
 			continue;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 1b45b4a..ee9f703 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -69,7 +69,7 @@
  *   Number of elements to allocate.
  */
 static void
-txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
+txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl, unsigned int elts_n)
 {
 	unsigned int i;
 
@@ -95,7 +95,7 @@ txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
  *   Pointer to TX queue structure.
  */
 static void
-txq_free_elts(struct txq_ctrl *txq_ctrl)
+txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
 {
 	const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
 	const uint16_t elts_m = elts_n - 1;
@@ -132,7 +132,7 @@ txq_free_elts(struct txq_ctrl *txq_ctrl)
  *   Pointer to TX queue structure.
  */
 void
-txq_cleanup(struct txq_ctrl *txq_ctrl)
+mlx5_txq_cleanup(struct mlx5_txq_ctrl *txq_ctrl)
 {
 	size_t i;
 
@@ -162,7 +162,7 @@ txq_cleanup(struct txq_ctrl *txq_ctrl)
  *   0 on success, errno value on failure.
  */
 static inline int
-txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
+txq_setup(struct mlx5_txq_ctrl *tmpl, struct mlx5_txq_ctrl *txq_ctrl)
 {
 	struct mlx5dv_qp qp;
 	struct ibv_cq *ibcq = tmpl->cq;
@@ -225,12 +225,12 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
  *   0 on success, errno value on failure.
  */
 int
-txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
-	       uint16_t desc, unsigned int socket,
-	       const struct rte_eth_txconf *conf)
+mlx5_txq_ctrl_setup(struct rte_eth_dev *dev, struct mlx5_txq_ctrl *txq_ctrl,
+		    uint16_t desc, unsigned int socket,
+		    const struct rte_eth_txconf *conf)
 {
 	struct priv *priv = mlx5_get_priv(dev);
-	struct txq_ctrl tmpl = {
+	struct mlx5_txq_ctrl tmpl = {
 		.priv = priv,
 		.socket = socket,
 	};
@@ -422,15 +422,15 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
 	}
 	/* Clean up txq in case we're reinitializing it. */
 	DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl);
-	txq_cleanup(txq_ctrl);
+	mlx5_txq_cleanup(txq_ctrl);
 	*txq_ctrl = tmpl;
 	DEBUG("%p: txq updated with %p", (void *)txq_ctrl, (void *)&tmpl);
 	/* Pre-register known mempools. */
-	rte_mempool_walk(txq_mp2mr_iter, txq_ctrl);
+	rte_mempool_walk(mlx5_txq_mp2mr_iter, txq_ctrl);
 	assert(ret == 0);
 	return 0;
 error:
-	txq_cleanup(&tmpl);
+	mlx5_txq_cleanup(&tmpl);
 	assert(ret > 0);
 	return ret;
 }
@@ -457,8 +457,9 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		    unsigned int socket, const struct rte_eth_txconf *conf)
 {
 	struct priv *priv = dev->data->dev_private;
-	struct txq *txq = (*priv->txqs)[idx];
-	struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
+	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
+	struct mlx5_txq_ctrl *txq_ctrl =
+		container_of(txq, struct mlx5_txq_ctrl, txq);
 	int ret;
 
 	if (mlx5_is_secondary())
@@ -494,7 +495,7 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			return -EEXIST;
 		}
 		(*priv->txqs)[idx] = NULL;
-		txq_cleanup(txq_ctrl);
+		mlx5_txq_cleanup(txq_ctrl);
 		/* Resize if txq size is changed. */
 		if (txq_ctrl->txq.elts_n != log2above(desc)) {
 			txq_ctrl = rte_realloc(txq_ctrl,
@@ -521,7 +522,7 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			return -ENOMEM;
 		}
 	}
-	ret = txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf);
+	ret = mlx5_txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf);
 	if (ret)
 		rte_free(txq_ctrl);
 	else {
@@ -543,8 +544,8 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 void
 mlx5_tx_queue_release(void *dpdk_txq)
 {
-	struct txq *txq = (struct txq *)dpdk_txq;
-	struct txq_ctrl *txq_ctrl;
+	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
+	struct mlx5_txq_ctrl *txq_ctrl;
 	struct priv *priv;
 	unsigned int i;
 
@@ -553,7 +554,7 @@ mlx5_tx_queue_release(void *dpdk_txq)
 
 	if (txq == NULL)
 		return;
-	txq_ctrl = container_of(txq, struct txq_ctrl, txq);
+	txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
 	priv = txq_ctrl->priv;
 	priv_lock(priv);
 	for (i = 0; (i != priv->txqs_n); ++i)
@@ -563,7 +564,7 @@ mlx5_tx_queue_release(void *dpdk_txq)
 			(*priv->txqs)[i] = NULL;
 			break;
 		}
-	txq_cleanup(txq_ctrl);
+	mlx5_txq_cleanup(txq_ctrl);
 	rte_free(txq_ctrl);
 	priv_unlock(priv);
 }
@@ -588,8 +589,8 @@ priv_tx_uar_remap(struct priv *priv, int fd)
 	unsigned int pages_n = 0;
 	uintptr_t uar_va;
 	void *addr;
-	struct txq *txq;
-	struct txq_ctrl *txq_ctrl;
+	struct mlx5_txq_data *txq;
+	struct mlx5_txq_ctrl *txq_ctrl;
 	int already_mapped;
 	size_t page_size = sysconf(_SC_PAGESIZE);
 
@@ -600,7 +601,7 @@ priv_tx_uar_remap(struct priv *priv, int fd)
 	 */
 	for (i = 0; i != priv->txqs_n; ++i) {
 		txq = (*priv->txqs)[i];
-		txq_ctrl = container_of(txq, struct txq_ctrl, txq);
+		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
 		uar_va = (uintptr_t)txq_ctrl->txq.bf_reg;
 		uar_va = RTE_ALIGN_FLOOR(uar_va, page_size);
 		already_mapped = 0;
-- 
2.1.4

  parent reply	other threads:[~2017-10-09 14:45 UTC|newest]

Thread overview: 129+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-08-02 14:10 [dpdk-dev] [PATCH v1 00/21] net/mlx5: cleanup for isolated mode Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 01/21] net/mlx5: merge action and flow parser structure Nelio Laranjeiro
     [not found] ` <cover.1501681913.git.nelio.laranjeiro@6wind.com>
2017-08-02 14:10   ` [dpdk-dev] [PATCH v1] net/mlx5: support RSS hash configuration in generic flow action Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 02/21] net/mlx5: remove flow director support Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 03/21] net/mlx5: prefix Rx queue control structures Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 04/21] net/mlx5: prefix Tx control queue structures Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 05/21] net/mlx5: remove redundant started flag Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 06/21] net/mlx5: verify all flows are been removed on close Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 07/21] net/mlx5: add reference counter on memory region Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 08/21] net/mlx5: separate DPDK from Verbs Rx queue objects Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 09/21] net/mlx5: separate DPDK from Verbs Tx " Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 10/21] net/mlx5: add reference counter on DPDK Tx queues Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 11/21] net/mlx5: add reference counter on DPDK Rx queues Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 12/21] net/mlx5: remove queue drop support Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 13/21] net/mlx5: make indirection tables sharable Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 14/21] net/mlx5: add Hash Rx queue object Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 15/21] net/mlx5: disable priority protection in flows Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 16/21] net/mlx5: use flow to enable promiscuous mode Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 17/21] net/mlx5: use flow to enable all multi mode Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 18/21] net/mlx5: use flow to enable unicast traffic Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 19/21] net/mlx5: handle a single RSS hash key for all protocols Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 20/21] net/mlx5: remove hash Rx queues support Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 21/21] net/mlx5: support RSS hash configuration in generic flow action Nelio Laranjeiro
2017-08-18 13:44 ` [dpdk-dev] [PATCH v1 00/21] net/mlx5: cleanup for isolated mode Ferruh Yigit
2017-08-22  9:15   ` Nélio Laranjeiro
2017-10-05 12:49 ` [dpdk-dev] [PATCH v2 00/30] " Nelio Laranjeiro
2017-10-05 19:14   ` Ferruh Yigit
     [not found] ` <cover.1507207731.git.nelio.laranjeiro@6wind.com>
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 01/30] net/mlx5: merge action and flow parser structure Nelio Laranjeiro
2017-10-06  0:47     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 02/30] net/mlx5: remove flow director support Nelio Laranjeiro
2017-10-06  0:49     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 03/30] net/mlx5: prefix Rx structures and functions Nelio Laranjeiro
2017-10-06  0:50     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 04/30] net/mlx5: prefix Tx " Nelio Laranjeiro
2017-10-06  0:50     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 05/30] net/mlx5: remove redundant started flag Nelio Laranjeiro
2017-10-06  0:50     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 06/30] net/mlx5: verify all flows are been removed on close Nelio Laranjeiro
2017-10-06  0:50     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 07/30] net/mlx5: fix reta update can segfault Nelio Laranjeiro
2017-10-06  0:51     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 08/30] net/mlx5: fix rxqs vector support verification Nelio Laranjeiro
2017-10-06  0:51     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 09/30] net/mlx5: add reference counter on memory region Nelio Laranjeiro
2017-10-06  1:11     ` Yongseok Koh
2017-10-06  8:30       ` Nélio Laranjeiro
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 10/30] net/mlx5: separate DPDK from Verbs Rx queue objects Nelio Laranjeiro
2017-10-06  3:26     ` Yongseok Koh
2017-10-06  8:52       ` Nélio Laranjeiro
2017-10-06 22:57         ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 11/30] net/mlx5: separate DPDK from Verbs Tx " Nelio Laranjeiro
2017-10-06  3:32     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 12/30] net/mlx5: add reference counter on DPDK Tx queues Nelio Laranjeiro
2017-10-06  3:51     ` Yongseok Koh
2017-10-09 18:33     ` Ferruh Yigit
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 13/30] net/mlx5: add reference counter on DPDK Rx queues Nelio Laranjeiro
2017-10-06  3:56     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 14/30] net/mlx5: make indirection tables shareable Nelio Laranjeiro
2017-10-06  4:08     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 15/30] net/mlx5: add Hash Rx queue object Nelio Laranjeiro
2017-10-06  4:59     ` Yongseok Koh
2017-10-06  7:03       ` Nélio Laranjeiro
2017-10-06 22:50         ` Yongseok Koh
2017-10-09  8:05           ` Nélio Laranjeiro
2017-10-09 13:48             ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 16/30] net/mlx5: fix clang compilation error Nelio Laranjeiro
2017-10-06  5:01     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 17/30] net/mlx5: use flow to enable promiscuous mode Nelio Laranjeiro
2017-10-06  5:07     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 18/30] net/mlx5: use flow to enable all multi mode Nelio Laranjeiro
2017-10-06  5:10     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 19/30] net/mlx5: use flow to enable unicast traffic Nelio Laranjeiro
2017-10-06  5:18     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 20/30] net/mlx5: handle a single RSS hash key for all protocols Nelio Laranjeiro
2017-10-06  5:23     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 21/30] net/mlx5: remove hash Rx queues support Nelio Laranjeiro
2017-10-06  5:27     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 22/30] net/mlx5: fully convert a flow to verbs in validate Nelio Laranjeiro
2017-10-06  5:33     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 23/30] net/mlx5: process flows actions before of items Nelio Laranjeiro
2017-10-06  5:36     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 24/30] net/mlx5: merge internal parser and actions structures Nelio Laranjeiro
2017-10-06  5:37     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 25/30] net/mlx5: use a better name for the flow parser Nelio Laranjeiro
2017-10-06  5:41     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 26/30] net/mlx5: reorganise functions in the file Nelio Laranjeiro
2017-10-06  5:42     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 27/30] net/mlx5: move Verbs flows and attributes Nelio Laranjeiro
2017-10-06  5:44     ` Yongseok Koh
2017-10-05 12:50   ` [dpdk-dev] [PATCH v2 28/30] net/mlx5: handle RSS hash configuration in RSS flow Nelio Laranjeiro
2017-10-06 17:30     ` Yongseok Koh
2017-10-05 12:50   ` [dpdk-dev] [PATCH v2 29/30] net/mlx5: support flow director Nelio Laranjeiro
2017-10-06  5:46     ` Yongseok Koh
2017-10-05 12:50   ` [dpdk-dev] [PATCH v2 30/30] net/mlx5: add new operations for isolated mode Nelio Laranjeiro
2017-10-06  5:48     ` Yongseok Koh
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 00/30] net/mlx5: cleanup " Nelio Laranjeiro
2017-10-09 17:17   ` Yongseok Koh
2017-10-09 18:35     ` Ferruh Yigit
2017-10-10  6:55       ` Nélio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 01/30] net/mlx5: merge action and flow parser structure Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 02/30] net/mlx5: remove flow director support Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 03/30] net/mlx5: prefix Rx structures and functions Nelio Laranjeiro
2017-10-09 14:44 ` Nelio Laranjeiro [this message]
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 05/30] net/mlx5: remove redundant started flag Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 06/30] net/mlx5: verify all flows are been removed on close Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 07/30] net/mlx5: fix reta update can segfault Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 08/30] net/mlx5: fix rxqs vector support verification Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 09/30] net/mlx5: add reference counter on memory region Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 10/30] net/mlx5: separate DPDK from Verbs Rx queue objects Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 11/30] net/mlx5: separate DPDK from Verbs Tx " Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 12/30] net/mlx5: add reference counter on DPDK Tx queues Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 13/30] net/mlx5: add reference counter on DPDK Rx queues Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 14/30] net/mlx5: make indirection tables shareable Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 15/30] net/mlx5: add Hash Rx queue object Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 16/30] net/mlx5: fix clang compilation error Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 17/30] net/mlx5: use flow to enable promiscuous mode Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 18/30] net/mlx5: use flow to enable all multi mode Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 19/30] net/mlx5: use flow to enable unicast traffic Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 20/30] net/mlx5: handle a single RSS hash key for all protocols Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 21/30] net/mlx5: remove hash Rx queues support Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 22/30] net/mlx5: fully convert a flow to verbs in validate Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 23/30] net/mlx5: process flows actions before of items Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 24/30] net/mlx5: merge internal parser and actions structures Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 25/30] net/mlx5: use a better name for the flow parser Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 26/30] net/mlx5: reorganise functions in the file Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 27/30] net/mlx5: move Verbs flows and attributes Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 28/30] net/mlx5: handle RSS hash configuration in RSS flow Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 29/30] net/mlx5: support flow director Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 30/30] net/mlx5: add new operations for isolated mode Nelio Laranjeiro

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5f2c7e5857ac98f1bbef4b776c431f8991465194.1507560012.git.nelio.laranjeiro@6wind.com \
    --to=nelio.laranjeiro@6wind.com \
    --cc=adrien.mazarguil@6wind.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=yskoh@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).