DPDK patches and discussions
 help / color / mirror / Atom feed
From: Yongseok Koh <yskoh@mellanox.com>
To: shahafs@mellanox.com
Cc: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v5 2/4] net/mlx5: remove redundant queue index
Date: Wed, 10 Apr 2019 11:41:16 -0700	[thread overview]
Message-ID: <20190410184118.39727-3-yskoh@mellanox.com> (raw)
Message-ID: <20190410184116.2wK8nrfZVRRkRuRCzH8bhLZ5AtuH5CkQpm9J59pH95o@z> (raw)
In-Reply-To: <20190410184118.39727-1-yskoh@mellanox.com>

Queue index is redundantly stored for both Rx and Tx structures.
E.g. txq_ctrl->idx and txq->stats.idx. Both are consolidated to single
storage - rxq->idx and txq->idx.

Also, rxq and txq are moved to the beginning of its control structure
(rxq_ctrl and txq_ctrl) for cacheline alignment.

Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Acked-by: Shahaf Shuler <shahafs@mellanox.com>
---
 drivers/net/mlx5/mlx5_rxq.c     | 29 ++++++++++++++---------------
 drivers/net/mlx5/mlx5_rxtx.h    | 10 ++++------
 drivers/net/mlx5/mlx5_stats.c   | 15 ++++++---------
 drivers/net/mlx5/mlx5_trigger.c |  2 +-
 drivers/net/mlx5/mlx5_txq.c     | 21 ++++++++++-----------
 5 files changed, 35 insertions(+), 42 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index dcb97c2100..8a84b0a1b5 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -156,7 +156,7 @@ rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
 	}
 	DRV_LOG(DEBUG,
 		"port %u Rx queue %u allocated and configured %u segments",
-		rxq->port_id, rxq_ctrl->idx, wqe_n);
+		rxq->port_id, rxq->idx, wqe_n);
 	return 0;
 error:
 	err = rte_errno; /* Save rte_errno before cleanup. */
@@ -168,7 +168,7 @@ rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
 		(*rxq->mprq_bufs)[i] = NULL;
 	}
 	DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
-		rxq->port_id, rxq_ctrl->idx);
+		rxq->port_id, rxq->idx);
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
 }
@@ -241,7 +241,7 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
 	DRV_LOG(DEBUG,
 		"port %u Rx queue %u allocated and configured %u segments"
 		" (max %u packets)",
-		PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n,
+		PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
 		elts_n / (1 << rxq_ctrl->rxq.sges_n));
 	return 0;
 error:
@@ -253,7 +253,7 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
 		(*rxq_ctrl->rxq.elts)[i] = NULL;
 	}
 	DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
-		PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
+		PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
 }
@@ -287,7 +287,7 @@ rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
 	uint16_t i;
 
 	DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
-		rxq->port_id, rxq_ctrl->idx);
+		rxq->port_id, rxq->idx);
 	if (rxq->mprq_bufs == NULL)
 		return;
 	assert(mlx5_rxq_check_vec_support(rxq) < 0);
@@ -318,7 +318,7 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
 	uint16_t i;
 
 	DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
-		PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
+		PORT_ID(rxq_ctrl->priv), rxq->idx);
 	if (rxq->elts == NULL)
 		return;
 	/**
@@ -364,7 +364,7 @@ void
 mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
 {
 	DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
-		PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
+		PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
 	if (rxq_ctrl->ibv)
 		mlx5_rxq_ibv_release(rxq_ctrl->ibv);
 	memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
@@ -495,11 +495,11 @@ mlx5_rx_queue_release(void *dpdk_rxq)
 		return;
 	rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
 	priv = rxq_ctrl->priv;
-	if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx))
+	if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
 		rte_panic("port %u Rx queue %u is still used by a flow and"
 			  " cannot be removed\n",
-			  PORT_ID(priv), rxq_ctrl->idx);
-	mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx);
+			  PORT_ID(priv), rxq->idx);
+	mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
 }
 
 /**
@@ -793,7 +793,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
 	if (!tmpl) {
 		DRV_LOG(ERR,
 			"port %u Rx queue %u cannot allocate verbs resources",
-			dev->data->port_id, rxq_ctrl->idx);
+			dev->data->port_id, rxq_data->idx);
 		rte_errno = ENOMEM;
 		goto error;
 	}
@@ -1104,7 +1104,7 @@ mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
 
 	LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
 		DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
-			dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
+			dev->data->port_id, rxq_ibv->rxq_ctrl->rxq.idx);
 		++ret;
 	}
 	return ret;
@@ -1470,7 +1470,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	tmpl->rxq.port_id = dev->data->port_id;
 	tmpl->priv = priv;
 	tmpl->rxq.mp = mp;
-	tmpl->rxq.stats.idx = idx;
 	tmpl->rxq.elts_n = log2above(desc);
 	tmpl->rxq.rq_repl_thresh =
 		MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
@@ -1479,7 +1478,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 #ifndef RTE_ARCH_64
 	tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
 #endif
-	tmpl->idx = idx;
+	tmpl->rxq.idx = idx;
 	rte_atomic32_inc(&tmpl->refcnt);
 	LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
 	return tmpl;
@@ -1592,7 +1591,7 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
 
 	LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
 		DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
-			dev->data->port_id, rxq_ctrl->idx);
+			dev->data->port_id, rxq_ctrl->rxq.idx);
 		++ret;
 	}
 	return ret;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index ced9945888..7b58063ceb 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -41,7 +41,6 @@
 #define MLX5_FLOW_TUNNEL 5
 
 struct mlx5_rxq_stats {
-	unsigned int idx; /**< Mapping index. */
 #ifdef MLX5_PMD_SOFT_COUNTERS
 	uint64_t ipackets; /**< Total of successfully received packets. */
 	uint64_t ibytes; /**< Total of successfully received bytes. */
@@ -51,7 +50,6 @@ struct mlx5_rxq_stats {
 };
 
 struct mlx5_txq_stats {
-	unsigned int idx; /**< Mapping index. */
 #ifdef MLX5_PMD_SOFT_COUNTERS
 	uint64_t opackets; /**< Total of successfully sent packets. */
 	uint64_t obytes; /**< Total of successfully sent bytes. */
@@ -116,6 +114,7 @@ struct mlx5_rxq_data {
 	struct rte_mempool *mp;
 	struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
 	struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
+	uint16_t idx; /* Queue index. */
 	struct mlx5_rxq_stats stats;
 	uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
 	struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
@@ -141,14 +140,13 @@ struct mlx5_rxq_ibv {
 
 /* RX queue control descriptor. */
 struct mlx5_rxq_ctrl {
+	struct mlx5_rxq_data rxq; /* Data path structure. */
 	LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
 	rte_atomic32_t refcnt; /* Reference counter. */
 	struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
 	struct mlx5_priv *priv; /* Back pointer to private data. */
-	struct mlx5_rxq_data rxq; /* Data path structure. */
 	unsigned int socket; /* CPU socket ID for allocations. */
 	unsigned int irq:1; /* Whether IRQ is enabled. */
-	uint16_t idx; /* Queue index. */
 	uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
 	uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
 };
@@ -205,6 +203,7 @@ struct mlx5_txq_data {
 	volatile uint32_t *cq_db; /* Completion queue doorbell. */
 	volatile void *bf_reg; /* Blueflame register remapped. */
 	struct rte_mbuf *(*elts)[]; /* TX elements. */
+	uint16_t idx; /* Queue index. */
 	struct mlx5_txq_stats stats; /* TX queue counters. */
 #ifndef RTE_ARCH_64
 	rte_spinlock_t *uar_lock;
@@ -223,6 +222,7 @@ struct mlx5_txq_ibv {
 
 /* TX queue control descriptor. */
 struct mlx5_txq_ctrl {
+	struct mlx5_txq_data txq; /* Data path structure. */
 	LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
 	rte_atomic32_t refcnt; /* Reference counter. */
 	unsigned int socket; /* CPU socket ID for allocations. */
@@ -230,10 +230,8 @@ struct mlx5_txq_ctrl {
 	unsigned int max_tso_header; /* Max TSO header size. */
 	struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
 	struct mlx5_priv *priv; /* Back pointer to private data. */
-	struct mlx5_txq_data txq; /* Data path structure. */
 	off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
 	volatile void *bf_reg_orig; /* Blueflame register from verbs. */
-	uint16_t idx; /* Queue index. */
 };
 
 /* mlx5_rxq.c */
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index 5af199d0d5..ed50667f45 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -386,7 +386,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
 		if (rxq == NULL)
 			continue;
-		idx = rxq->stats.idx;
+		idx = rxq->idx;
 		if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
 #ifdef MLX5_PMD_SOFT_COUNTERS
 			tmp.q_ipackets[idx] += rxq->stats.ipackets;
@@ -407,7 +407,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
 		if (txq == NULL)
 			continue;
-		idx = txq->stats.idx;
+		idx = txq->idx;
 		if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
 #ifdef MLX5_PMD_SOFT_COUNTERS
 			tmp.q_opackets[idx] += txq->stats.opackets;
@@ -442,21 +442,18 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl;
 	unsigned int i;
-	unsigned int idx;
 
 	for (i = 0; (i != priv->rxqs_n); ++i) {
 		if ((*priv->rxqs)[i] == NULL)
 			continue;
-		idx = (*priv->rxqs)[i]->stats.idx;
-		(*priv->rxqs)[i]->stats =
-			(struct mlx5_rxq_stats){ .idx = idx };
+		memset(&(*priv->rxqs)[i]->stats, 0,
+		       sizeof(struct mlx5_rxq_stats));
 	}
 	for (i = 0; (i != priv->txqs_n); ++i) {
 		if ((*priv->txqs)[i] == NULL)
 			continue;
-		idx = (*priv->txqs)[i]->stats.idx;
-		(*priv->txqs)[i]->stats =
-			(struct mlx5_txq_stats){ .idx = idx };
+		memset(&(*priv->txqs)[i]->stats, 0,
+		       sizeof(struct mlx5_txq_stats));
 	}
 	mlx5_read_ib_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base);
 #ifndef MLX5_PMD_SOFT_COUNTERS
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 5b73f0ff03..7c1e5594d6 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -123,7 +123,7 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
 		DRV_LOG(DEBUG,
 			"port %u Rx queue %u registering"
 			" mp %s having %u chunks",
-			dev->data->port_id, rxq_ctrl->idx,
+			dev->data->port_id, rxq_ctrl->rxq.idx,
 			mp->name, mp->nb_mem_chunks);
 		mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
 		ret = rxq_alloc_elts(rxq_ctrl);
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 1b3d89f2f6..4bd08cb035 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -48,7 +48,7 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
 	for (i = 0; (i != elts_n); ++i)
 		(*txq_ctrl->txq.elts)[i] = NULL;
 	DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
-		PORT_ID(txq_ctrl->priv), txq_ctrl->idx, elts_n);
+		PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
 	txq_ctrl->txq.elts_head = 0;
 	txq_ctrl->txq.elts_tail = 0;
 	txq_ctrl->txq.elts_comp = 0;
@@ -70,7 +70,7 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
 	struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
 
 	DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
-		PORT_ID(txq_ctrl->priv), txq_ctrl->idx);
+		PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
 	txq_ctrl->txq.elts_head = 0;
 	txq_ctrl->txq.elts_tail = 0;
 	txq_ctrl->txq.elts_comp = 0;
@@ -224,7 +224,7 @@ mlx5_tx_queue_release(void *dpdk_txq)
 		if ((*priv->txqs)[i] == txq) {
 			mlx5_txq_release(ETH_DEV(priv), i);
 			DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
-				PORT_ID(priv), txq_ctrl->idx);
+				PORT_ID(priv), txq->idx);
 			break;
 		}
 }
@@ -273,7 +273,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
 			continue;
 		txq = (*priv->txqs)[i];
 		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
-		assert(txq_ctrl->idx == (uint16_t)i);
+		assert(txq->idx == (uint16_t)i);
 		/* UAR addr form verbs used to find dup and offset in page. */
 		uar_va = (uintptr_t)txq_ctrl->bf_reg_orig;
 		off = uar_va & (page_size - 1); /* offset in page. */
@@ -301,7 +301,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
 				DRV_LOG(ERR,
 					"port %u call to mmap failed on UAR"
 					" for txq %u",
-					dev->data->port_id, txq_ctrl->idx);
+					dev->data->port_id, txq->idx);
 				rte_errno = ENXIO;
 				return -rte_errno;
 			}
@@ -629,7 +629,7 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
 
 	LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
 		DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
-			dev->data->port_id, txq_ibv->txq_ctrl->idx);
+			dev->data->port_id, txq_ibv->txq_ctrl->txq.idx);
 		++ret;
 	}
 	return ret;
@@ -778,7 +778,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	tmpl->priv = priv;
 	tmpl->socket = socket;
 	tmpl->txq.elts_n = log2above(desc);
-	tmpl->idx = idx;
+	tmpl->txq.idx = idx;
 	txq_set_params(tmpl);
 	DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
 		dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
@@ -786,7 +786,6 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
 	tmpl->txq.elts =
 		(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
-	tmpl->txq.stats.idx = idx;
 	rte_atomic32_inc(&tmpl->refcnt);
 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
 	return tmpl;
@@ -893,12 +892,12 @@ int
 mlx5_txq_verify(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_txq_ctrl *txq;
+	struct mlx5_txq_ctrl *txq_ctrl;
 	int ret = 0;
 
-	LIST_FOREACH(txq, &priv->txqsctrl, next) {
+	LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
 		DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
-			dev->data->port_id, txq->idx);
+			dev->data->port_id, txq_ctrl->txq.idx);
 		++ret;
 	}
 	return ret;
-- 
2.11.0


  parent reply	other threads:[~2019-04-10 18:41 UTC|newest]

Thread overview: 66+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-25 19:36 [dpdk-dev] [PATCH 0/3] net/mlx: remove device register remap Yongseok Koh
2019-03-25 19:36 ` Yongseok Koh
2019-03-25 19:36 ` [dpdk-dev] [PATCH 1/3] net/mlx5: fix recursive inclusion of header file Yongseok Koh
2019-03-25 19:36   ` Yongseok Koh
2019-03-25 19:36 ` [dpdk-dev] [PATCH 2/3] net/mlx5: remove device register remap Yongseok Koh
2019-03-25 19:36   ` Yongseok Koh
2019-03-25 19:36 ` [dpdk-dev] [PATCH 3/3] net/mlx4: " Yongseok Koh
2019-03-25 19:36   ` Yongseok Koh
2019-04-01 21:22 ` [dpdk-dev] [PATCH v2 0/3] net/mlx: " Yongseok Koh
2019-04-01 21:22   ` Yongseok Koh
2019-04-01 21:22   ` [dpdk-dev] [PATCH v2 1/3] net/mlx5: fix recursive inclusion of header file Yongseok Koh
2019-04-01 21:22     ` Yongseok Koh
2019-04-02  5:39     ` Shahaf Shuler
2019-04-02  5:39       ` Shahaf Shuler
2019-04-01 21:22   ` [dpdk-dev] [PATCH v2 2/3] net/mlx5: remove device register remap Yongseok Koh
2019-04-01 21:22     ` Yongseok Koh
2019-04-02  6:50     ` Shahaf Shuler
2019-04-02  6:50       ` Shahaf Shuler
2019-04-01 21:22   ` [dpdk-dev] [PATCH v2 3/3] net/mlx4: " Yongseok Koh
2019-04-01 21:22     ` Yongseok Koh
2019-04-05  1:33 ` [dpdk-dev] [PATCH v3 0/4] net/mlx: " Yongseok Koh
2019-04-05  1:33   ` Yongseok Koh
2019-04-05  1:33   ` [dpdk-dev] [PATCH v3 1/4] net/mlx5: fix recursive inclusion of header file Yongseok Koh
2019-04-05  1:33     ` Yongseok Koh
2019-04-05  1:33   ` [dpdk-dev] [PATCH v3 2/4] net/mlx5: remove redundant queue index Yongseok Koh
2019-04-05  1:33     ` Yongseok Koh
2019-04-08  5:24     ` Shahaf Shuler
2019-04-08  5:24       ` Shahaf Shuler
2019-04-05  1:33   ` [dpdk-dev] [PATCH v3 3/4] net/mlx5: remove device register remap Yongseok Koh
2019-04-05  1:33     ` Yongseok Koh
2019-04-08  5:48     ` Shahaf Shuler
2019-04-08  5:48       ` Shahaf Shuler
2019-04-09 19:36       ` Yongseok Koh
2019-04-09 19:36         ` Yongseok Koh
2019-04-05  1:33   ` [dpdk-dev] [PATCH v3 4/4] net/mlx4: " Yongseok Koh
2019-04-05  1:33     ` Yongseok Koh
2019-04-09 23:13 ` [dpdk-dev] [PATCH v4 0/4] net/mlx: " Yongseok Koh
2019-04-09 23:13   ` Yongseok Koh
2019-04-09 23:13   ` [dpdk-dev] [PATCH v4 1/4] net/mlx5: fix recursive inclusion of header file Yongseok Koh
2019-04-09 23:13     ` Yongseok Koh
2019-04-09 23:13   ` [dpdk-dev] [PATCH v4 2/4] net/mlx5: remove redundant queue index Yongseok Koh
2019-04-09 23:13     ` Yongseok Koh
2019-04-09 23:13   ` [dpdk-dev] [PATCH v4 3/4] net/mlx5: remove device register remap Yongseok Koh
2019-04-09 23:13     ` Yongseok Koh
2019-04-10 17:50     ` Ferruh Yigit
2019-04-10 17:50       ` Ferruh Yigit
2019-04-10 19:12       ` Yongseok Koh
2019-04-10 19:12         ` Yongseok Koh
2019-04-09 23:13   ` [dpdk-dev] [PATCH v4 4/4] net/mlx4: " Yongseok Koh
2019-04-09 23:13     ` Yongseok Koh
2019-04-10  6:58   ` [dpdk-dev] [PATCH v4 0/4] net/mlx: " Shahaf Shuler
2019-04-10  6:58     ` Shahaf Shuler
2019-04-10 17:50     ` Ferruh Yigit
2019-04-10 17:50       ` Ferruh Yigit
2019-04-10 18:41 ` [dpdk-dev] [PATCH v5 " Yongseok Koh
2019-04-10 18:41   ` Yongseok Koh
2019-04-10 18:41   ` [dpdk-dev] [PATCH v5 1/4] net/mlx5: fix recursive inclusion of header file Yongseok Koh
2019-04-10 18:41     ` Yongseok Koh
2019-04-10 18:41   ` Yongseok Koh [this message]
2019-04-10 18:41     ` [dpdk-dev] [PATCH v5 2/4] net/mlx5: remove redundant queue index Yongseok Koh
2019-04-10 18:41   ` [dpdk-dev] [PATCH v5 3/4] net/mlx5: remove device register remap Yongseok Koh
2019-04-10 18:41     ` Yongseok Koh
2019-04-10 18:41   ` [dpdk-dev] [PATCH v5 4/4] net/mlx4: " Yongseok Koh
2019-04-10 18:41     ` Yongseok Koh
2019-04-11  8:40   ` [dpdk-dev] [PATCH v5 0/4] net/mlx: " Shahaf Shuler
2019-04-11  8:40     ` Shahaf Shuler

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190410184118.39727-3-yskoh@mellanox.com \
    --to=yskoh@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=shahafs@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).