DPDK patches and discussions
 help / color / mirror / Atom feed
From: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
To: dev@dpdk.org
Cc: adrien.mazarguil@6wind.com, yskoh@mellanox.com, ferruh.yigit@intel.com
Subject: [dpdk-dev] [PATCH v3 14/30] net/mlx5: make indirection tables shareable
Date: Mon,  9 Oct 2017 16:44:50 +0200	[thread overview]
Message-ID: <f5bf2ae66e792daee97a1f32c681e68e4f5044c4.1507560012.git.nelio.laranjeiro@6wind.com> (raw)
In-Reply-To: <cover.1507560012.git.nelio.laranjeiro@6wind.com>
In-Reply-To: <cover.1507560012.git.nelio.laranjeiro@6wind.com>

Indirection table in verbs side resides in a list of final work queues to
spread the packets according to an higher level queue.  This indirection
table can be shared among the hash Rx queues which points to them.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
---
 drivers/net/mlx5/mlx5.c       |   3 +
 drivers/net/mlx5/mlx5.h       |   2 +
 drivers/net/mlx5/mlx5_flow.c  |  83 ++++++++++-------------
 drivers/net/mlx5/mlx5_rxq.c   | 153 ++++++++++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_rxtx.h  |  17 +++++
 drivers/net/mlx5/mlx5_utils.h |   2 +
 6 files changed, 214 insertions(+), 46 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index c2c3d1b..46b4067 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -235,6 +235,9 @@ mlx5_dev_close(struct rte_eth_dev *dev)
 	if (priv->reta_idx != NULL)
 		rte_free(priv->reta_idx);
 	priv_socket_uninit(priv);
+	ret = mlx5_priv_ind_table_ibv_verify(priv);
+	if (ret)
+		WARN("%p: some Indirection table still remain", (void *)priv);
 	ret = mlx5_priv_rxq_ibv_verify(priv);
 	if (ret)
 		WARN("%p: some Verbs Rx queue still remain", (void *)priv);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index d0ef21a..ab17ce6 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -151,6 +151,8 @@ struct priv {
 	LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */
 	LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
 	LIST_HEAD(txqibv, mlx5_txq_ibv) txqsibv; /* Verbs Tx queues. */
+	/* Verbs Indirection tables. */
+	LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls;
 	uint32_t link_speed_capa; /* Link speed capabilities. */
 	struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
 	rte_spinlock_t lock; /* Lock for control functions. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 362ec91..dc9adeb 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -90,7 +90,7 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
 struct rte_flow {
 	TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
 	struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
-	struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
+	struct mlx5_ind_table_ibv *ind_table; /**< Indirection table. */
 	struct ibv_qp *qp; /**< Verbs queue pair. */
 	struct ibv_flow *ibv_flow; /**< Verbs flow. */
 	struct ibv_wq *wq; /**< Verbs work queue. */
@@ -98,8 +98,6 @@ struct rte_flow {
 	uint32_t mark:1; /**< Set if the flow is marked. */
 	uint32_t drop:1; /**< Drop queue. */
 	uint64_t hash_fields; /**< Fields that participate in the hash. */
-	uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< List of queues. */
-	uint16_t queues_n; /**< Number of queues in the list. */
 };
 
 /** Static initializer for items. */
@@ -1089,9 +1087,6 @@ priv_flow_create_action_queue(struct priv *priv,
 {
 	struct rte_flow *rte_flow;
 	unsigned int i;
-	unsigned int j;
-	const unsigned int wqs_n = 1 << log2above(flow->actions.queues_n);
-	struct ibv_wq *wqs[wqs_n];
 
 	assert(priv->pd);
 	assert(priv->ctx);
@@ -1102,36 +1097,29 @@ priv_flow_create_action_queue(struct priv *priv,
 				   NULL, "cannot allocate flow memory");
 		return NULL;
 	}
-	for (i = 0; i < flow->actions.queues_n; ++i) {
-		struct mlx5_rxq_ibv *rxq_ibv =
-			mlx5_priv_rxq_ibv_get(priv, flow->actions.queues[i]);
-
-		wqs[i] = rxq_ibv->wq;
-		rte_flow->queues[i] = flow->actions.queues[i];
-		++rte_flow->queues_n;
-		(*priv->rxqs)[flow->actions.queues[i]]->mark |=
-			flow->actions.mark;
-	}
-	/* finalise indirection table. */
-	for (j = 0; i < wqs_n; ++i, ++j) {
-		wqs[i] = wqs[j];
-		if (j == flow->actions.queues_n)
-			j = 0;
+	for (i = 0; i != flow->actions.queues_n; ++i) {
+		struct mlx5_rxq_data *q =
+			(*priv->rxqs)[flow->actions.queues[i]];
+
+		q->mark |= flow->actions.mark;
 	}
 	rte_flow->mark = flow->actions.mark;
 	rte_flow->ibv_attr = flow->ibv_attr;
 	rte_flow->hash_fields = flow->hash_fields;
-	rte_flow->ind_table = ibv_create_rwq_ind_table(
-		priv->ctx,
-		&(struct ibv_rwq_ind_table_init_attr){
-			.log_ind_tbl_size = log2above(flow->actions.queues_n),
-			.ind_tbl = wqs,
-			.comp_mask = 0,
-		});
+	rte_flow->ind_table =
+		mlx5_priv_ind_table_ibv_get(priv, flow->actions.queues,
+					    flow->actions.queues_n);
 	if (!rte_flow->ind_table) {
-		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
-				   NULL, "cannot allocate indirection table");
-		goto error;
+		rte_flow->ind_table =
+			mlx5_priv_ind_table_ibv_new(priv, flow->actions.queues,
+						    flow->actions.queues_n);
+		if (!rte_flow->ind_table) {
+			rte_flow_error_set(error, ENOMEM,
+					   RTE_FLOW_ERROR_TYPE_HANDLE,
+					   NULL,
+					   "cannot allocate indirection table");
+			goto error;
+		}
 	}
 	rte_flow->qp = ibv_create_qp_ex(
 		priv->ctx,
@@ -1148,7 +1136,7 @@ priv_flow_create_action_queue(struct priv *priv,
 				.rx_hash_key = rss_hash_default_key,
 				.rx_hash_fields_mask = rte_flow->hash_fields,
 			},
-			.rwq_ind_tbl = rte_flow->ind_table,
+			.rwq_ind_tbl = rte_flow->ind_table->ind_table,
 			.pd = priv->pd
 		});
 	if (!rte_flow->qp) {
@@ -1171,7 +1159,7 @@ priv_flow_create_action_queue(struct priv *priv,
 	if (rte_flow->qp)
 		ibv_destroy_qp(rte_flow->qp);
 	if (rte_flow->ind_table)
-		ibv_destroy_rwq_ind_table(rte_flow->ind_table);
+		mlx5_priv_ind_table_ibv_release(priv, rte_flow->ind_table);
 	rte_free(rte_flow);
 	return NULL;
 }
@@ -1297,13 +1285,10 @@ priv_flow_destroy(struct priv *priv,
 		goto free;
 	if (flow->qp)
 		claim_zero(ibv_destroy_qp(flow->qp));
-	if (flow->ind_table)
-		claim_zero(ibv_destroy_rwq_ind_table(flow->ind_table));
-	for (i = 0; i != flow->queues_n; ++i) {
+	for (i = 0; i != flow->ind_table->queues_n; ++i) {
 		struct rte_flow *tmp;
-		struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[flow->queues[i]];
-		struct mlx5_rxq_ctrl *rxq_ctrl =
-			container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+		struct mlx5_rxq_data *rxq_data =
+			(*priv->rxqs)[flow->ind_table->queues[i]];
 
 		/*
 		 * To remove the mark from the queue, the queue must not be
@@ -1319,14 +1304,17 @@ priv_flow_destroy(struct priv *priv,
 					continue;
 				if (!tmp->mark)
 					continue;
-				for (j = 0; (j != tmp->queues_n) && !mark; j++)
-					if (tmp->queues[j] == flow->queues[i])
+				for (j = 0;
+				     (j != tmp->ind_table->queues_n) && !mark;
+				     j++)
+					if (tmp->ind_table->queues[j] ==
+					    flow->ind_table->queues[i])
 						mark = 1;
 			}
 			rxq_data->mark = mark;
 		}
-		mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv);
 	}
+	mlx5_priv_ind_table_ibv_release(priv, flow->ind_table);
 free:
 	rte_free(flow->ibv_attr);
 	DEBUG("Flow destroyed %p", (void *)flow);
@@ -1518,9 +1506,10 @@ priv_flow_stop(struct priv *priv)
 		flow->ibv_flow = NULL;
 		if (flow->mark) {
 			unsigned int n;
+			struct mlx5_ind_table_ibv *ind_tbl = flow->ind_table;
 
-			for (n = 0; n < flow->queues_n; ++n)
-				(*priv->rxqs)[flow->queues[n]]->mark = 0;
+			for (n = 0; n < ind_tbl->queues_n; ++n)
+				(*priv->rxqs)[ind_tbl->queues[n]]->mark = 0;
 		}
 		DEBUG("Flow %p removed", (void *)flow);
 	}
@@ -1562,8 +1551,10 @@ priv_flow_start(struct priv *priv)
 		if (flow->mark) {
 			unsigned int n;
 
-			for (n = 0; n < flow->queues_n; ++n)
-				(*priv->rxqs)[flow->queues[n]]->mark = 1;
+			for (n = 0; n < flow->ind_table->queues_n; ++n) {
+				uint16_t idx = flow->ind_table->queues[n];
+				(*priv->rxqs)[idx]->mark = 1;
+			}
 		}
 	}
 	return 0;
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 87efeed..4a53282 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1622,3 +1622,156 @@ mlx5_priv_rxq_verify(struct priv *priv)
 	}
 	return ret;
 }
+
+/**
+ * Create an indirection table.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param queues
+ *   Queues entering in the indirection table.
+ * @param queues_n
+ *   Number of queues in the array.
+ *
+ * @return
+ *   A new indirection table.
+ */
+struct mlx5_ind_table_ibv*
+mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
+			    uint16_t queues_n)
+{
+	struct mlx5_ind_table_ibv *ind_tbl;
+	const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
+		log2above(queues_n) :
+		priv->ind_table_max_size;
+	struct ibv_wq *wq[1 << wq_n];
+	unsigned int i;
+	unsigned int j;
+
+	ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
+			     queues_n * sizeof(uint16_t), 0);
+	if (!ind_tbl)
+		return NULL;
+	for (i = 0; i != queues_n; ++i) {
+		struct mlx5_rxq_ctrl *rxq =
+			mlx5_priv_rxq_get(priv, queues[i]);
+
+		if (!rxq)
+			goto error;
+		wq[i] = rxq->ibv->wq;
+		ind_tbl->queues[i] = queues[i];
+	}
+	ind_tbl->queues_n = queues_n;
+	/* Finalise indirection table. */
+	for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
+		wq[i] = wq[j];
+	ind_tbl->ind_table = ibv_create_rwq_ind_table(
+		priv->ctx,
+		&(struct ibv_rwq_ind_table_init_attr){
+			.log_ind_tbl_size = wq_n,
+			.ind_tbl = wq,
+			.comp_mask = 0,
+		});
+	if (!ind_tbl->ind_table)
+		goto error;
+	rte_atomic32_inc(&ind_tbl->refcnt);
+	LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
+	DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
+	      (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
+	return ind_tbl;
+error:
+	rte_free(ind_tbl);
+	DEBUG("%p cannot create indirection table", (void *)priv);
+	return NULL;
+}
+
+/**
+ * Get an indirection table.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param queues
+ *   Queues entering in the indirection table.
+ * @param queues_n
+ *   Number of queues in the array.
+ *
+ * @return
+ *   An indirection table if found.
+ */
+struct mlx5_ind_table_ibv*
+mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[],
+			    uint16_t queues_n)
+{
+	struct mlx5_ind_table_ibv *ind_tbl;
+
+	LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
+		if ((ind_tbl->queues_n == queues_n) &&
+		    (memcmp(ind_tbl->queues, queues,
+			    ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
+		     == 0))
+			break;
+	}
+	if (ind_tbl) {
+		unsigned int i;
+
+		rte_atomic32_inc(&ind_tbl->refcnt);
+		DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
+		      (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
+		for (i = 0; i != ind_tbl->queues_n; ++i)
+			mlx5_priv_rxq_get(priv, ind_tbl->queues[i]);
+	}
+	return ind_tbl;
+}
+
+/**
+ * Release an indirection table.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param ind_table
+ *   Indirection table to release.
+ *
+ * @return
+ *   0 on success, errno value on failure.
+ */
+int
+mlx5_priv_ind_table_ibv_release(struct priv *priv,
+				struct mlx5_ind_table_ibv *ind_tbl)
+{
+	unsigned int i;
+
+	DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
+	      (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
+	if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
+		claim_zero(ibv_destroy_rwq_ind_table(ind_tbl->ind_table));
+	for (i = 0; i != ind_tbl->queues_n; ++i)
+		claim_nonzero(mlx5_priv_rxq_release(priv, ind_tbl->queues[i]));
+	if (!rte_atomic32_read(&ind_tbl->refcnt)) {
+		LIST_REMOVE(ind_tbl, next);
+		rte_free(ind_tbl);
+		return 0;
+	}
+	return EBUSY;
+}
+
+/**
+ * Verify the Rx Queue list is empty
+ *
+ * @param priv
+ *  Pointer to private structure.
+ *
+ * @return the number of object not released.
+ */
+int
+mlx5_priv_ind_table_ibv_verify(struct priv *priv)
+{
+	struct mlx5_ind_table_ibv *ind_tbl;
+	int ret = 0;
+
+	LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
+		DEBUG("%p: Verbs indirection table %p still referenced",
+		      (void *)priv, (void *)ind_tbl);
+		++ret;
+	}
+	return ret;
+}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 44cfef5..b7c75bf 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -156,6 +156,15 @@ struct mlx5_rxq_ctrl {
 	unsigned int irq:1; /* Whether IRQ is enabled. */
 };
 
+/* Indirection table. */
+struct mlx5_ind_table_ibv {
+	LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */
+	rte_atomic32_t refcnt; /* Reference counter. */
+	struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
+	uint16_t queues_n; /**< Number of queues in the list. */
+	uint16_t queues[]; /**< Queue list. */
+};
+
 /* Hash RX queue types. */
 enum hash_rxq_type {
 	HASH_RXQ_TCPV4,
@@ -345,6 +354,14 @@ int mlx5_priv_rxq_release(struct priv *, uint16_t);
 int mlx5_priv_rxq_releasable(struct priv *, uint16_t);
 int mlx5_priv_rxq_verify(struct priv *);
 int rxq_alloc_elts(struct mlx5_rxq_ctrl *);
+struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_new(struct priv *,
+						       uint16_t [],
+						       uint16_t);
+struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_get(struct priv *,
+						       uint16_t [],
+						       uint16_t);
+int mlx5_priv_ind_table_ibv_release(struct priv *, struct mlx5_ind_table_ibv *);
+int mlx5_priv_ind_table_ibv_verify(struct priv *);
 
 /* mlx5_txq.c */
 
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index a824787..218ae83 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -128,11 +128,13 @@ pmd_drv_log_basename(const char *s)
 
 #define DEBUG(...) PMD_DRV_LOG(DEBUG, __VA_ARGS__)
 #define claim_zero(...) assert((__VA_ARGS__) == 0)
+#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
 
 #else /* NDEBUG */
 
 #define DEBUG(...) (void)0
 #define claim_zero(...) (__VA_ARGS__)
+#define claim_nonzero(...) (__VA_ARGS__)
 
 #endif /* NDEBUG */
 
-- 
2.1.4

  parent reply	other threads:[~2017-10-09 14:45 UTC|newest]

Thread overview: 129+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-08-02 14:10 [dpdk-dev] [PATCH v1 00/21] net/mlx5: cleanup for isolated mode Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 01/21] net/mlx5: merge action and flow parser structure Nelio Laranjeiro
     [not found] ` <cover.1501681913.git.nelio.laranjeiro@6wind.com>
2017-08-02 14:10   ` [dpdk-dev] [PATCH v1] net/mlx5: support RSS hash configuration in generic flow action Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 02/21] net/mlx5: remove flow director support Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 03/21] net/mlx5: prefix Rx queue control structures Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 04/21] net/mlx5: prefix Tx control queue structures Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 05/21] net/mlx5: remove redundant started flag Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 06/21] net/mlx5: verify all flows are been removed on close Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 07/21] net/mlx5: add reference counter on memory region Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 08/21] net/mlx5: separate DPDK from Verbs Rx queue objects Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 09/21] net/mlx5: separate DPDK from Verbs Tx " Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 10/21] net/mlx5: add reference counter on DPDK Tx queues Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 11/21] net/mlx5: add reference counter on DPDK Rx queues Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 12/21] net/mlx5: remove queue drop support Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 13/21] net/mlx5: make indirection tables sharable Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 14/21] net/mlx5: add Hash Rx queue object Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 15/21] net/mlx5: disable priority protection in flows Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 16/21] net/mlx5: use flow to enable promiscuous mode Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 17/21] net/mlx5: use flow to enable all multi mode Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 18/21] net/mlx5: use flow to enable unicast traffic Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 19/21] net/mlx5: handle a single RSS hash key for all protocols Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 20/21] net/mlx5: remove hash Rx queues support Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 21/21] net/mlx5: support RSS hash configuration in generic flow action Nelio Laranjeiro
2017-08-18 13:44 ` [dpdk-dev] [PATCH v1 00/21] net/mlx5: cleanup for isolated mode Ferruh Yigit
2017-08-22  9:15   ` Nélio Laranjeiro
2017-10-05 12:49 ` [dpdk-dev] [PATCH v2 00/30] " Nelio Laranjeiro
2017-10-05 19:14   ` Ferruh Yigit
     [not found] ` <cover.1507207731.git.nelio.laranjeiro@6wind.com>
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 01/30] net/mlx5: merge action and flow parser structure Nelio Laranjeiro
2017-10-06  0:47     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 02/30] net/mlx5: remove flow director support Nelio Laranjeiro
2017-10-06  0:49     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 03/30] net/mlx5: prefix Rx structures and functions Nelio Laranjeiro
2017-10-06  0:50     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 04/30] net/mlx5: prefix Tx " Nelio Laranjeiro
2017-10-06  0:50     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 05/30] net/mlx5: remove redundant started flag Nelio Laranjeiro
2017-10-06  0:50     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 06/30] net/mlx5: verify all flows are been removed on close Nelio Laranjeiro
2017-10-06  0:50     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 07/30] net/mlx5: fix reta update can segfault Nelio Laranjeiro
2017-10-06  0:51     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 08/30] net/mlx5: fix rxqs vector support verification Nelio Laranjeiro
2017-10-06  0:51     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 09/30] net/mlx5: add reference counter on memory region Nelio Laranjeiro
2017-10-06  1:11     ` Yongseok Koh
2017-10-06  8:30       ` Nélio Laranjeiro
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 10/30] net/mlx5: separate DPDK from Verbs Rx queue objects Nelio Laranjeiro
2017-10-06  3:26     ` Yongseok Koh
2017-10-06  8:52       ` Nélio Laranjeiro
2017-10-06 22:57         ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 11/30] net/mlx5: separate DPDK from Verbs Tx " Nelio Laranjeiro
2017-10-06  3:32     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 12/30] net/mlx5: add reference counter on DPDK Tx queues Nelio Laranjeiro
2017-10-06  3:51     ` Yongseok Koh
2017-10-09 18:33     ` Ferruh Yigit
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 13/30] net/mlx5: add reference counter on DPDK Rx queues Nelio Laranjeiro
2017-10-06  3:56     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 14/30] net/mlx5: make indirection tables shareable Nelio Laranjeiro
2017-10-06  4:08     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 15/30] net/mlx5: add Hash Rx queue object Nelio Laranjeiro
2017-10-06  4:59     ` Yongseok Koh
2017-10-06  7:03       ` Nélio Laranjeiro
2017-10-06 22:50         ` Yongseok Koh
2017-10-09  8:05           ` Nélio Laranjeiro
2017-10-09 13:48             ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 16/30] net/mlx5: fix clang compilation error Nelio Laranjeiro
2017-10-06  5:01     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 17/30] net/mlx5: use flow to enable promiscuous mode Nelio Laranjeiro
2017-10-06  5:07     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 18/30] net/mlx5: use flow to enable all multi mode Nelio Laranjeiro
2017-10-06  5:10     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 19/30] net/mlx5: use flow to enable unicast traffic Nelio Laranjeiro
2017-10-06  5:18     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 20/30] net/mlx5: handle a single RSS hash key for all protocols Nelio Laranjeiro
2017-10-06  5:23     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 21/30] net/mlx5: remove hash Rx queues support Nelio Laranjeiro
2017-10-06  5:27     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 22/30] net/mlx5: fully convert a flow to verbs in validate Nelio Laranjeiro
2017-10-06  5:33     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 23/30] net/mlx5: process flows actions before of items Nelio Laranjeiro
2017-10-06  5:36     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 24/30] net/mlx5: merge internal parser and actions structures Nelio Laranjeiro
2017-10-06  5:37     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 25/30] net/mlx5: use a better name for the flow parser Nelio Laranjeiro
2017-10-06  5:41     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 26/30] net/mlx5: reorganise functions in the file Nelio Laranjeiro
2017-10-06  5:42     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 27/30] net/mlx5: move Verbs flows and attributes Nelio Laranjeiro
2017-10-06  5:44     ` Yongseok Koh
2017-10-05 12:50   ` [dpdk-dev] [PATCH v2 28/30] net/mlx5: handle RSS hash configuration in RSS flow Nelio Laranjeiro
2017-10-06 17:30     ` Yongseok Koh
2017-10-05 12:50   ` [dpdk-dev] [PATCH v2 29/30] net/mlx5: support flow director Nelio Laranjeiro
2017-10-06  5:46     ` Yongseok Koh
2017-10-05 12:50   ` [dpdk-dev] [PATCH v2 30/30] net/mlx5: add new operations for isolated mode Nelio Laranjeiro
2017-10-06  5:48     ` Yongseok Koh
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 00/30] net/mlx5: cleanup " Nelio Laranjeiro
2017-10-09 17:17   ` Yongseok Koh
2017-10-09 18:35     ` Ferruh Yigit
2017-10-10  6:55       ` Nélio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 01/30] net/mlx5: merge action and flow parser structure Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 02/30] net/mlx5: remove flow director support Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 03/30] net/mlx5: prefix Rx structures and functions Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 04/30] net/mlx5: prefix Tx " Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 05/30] net/mlx5: remove redundant started flag Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 06/30] net/mlx5: verify all flows are been removed on close Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 07/30] net/mlx5: fix reta update can segfault Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 08/30] net/mlx5: fix rxqs vector support verification Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 09/30] net/mlx5: add reference counter on memory region Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 10/30] net/mlx5: separate DPDK from Verbs Rx queue objects Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 11/30] net/mlx5: separate DPDK from Verbs Tx " Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 12/30] net/mlx5: add reference counter on DPDK Tx queues Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 13/30] net/mlx5: add reference counter on DPDK Rx queues Nelio Laranjeiro
2017-10-09 14:44 ` Nelio Laranjeiro [this message]
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 15/30] net/mlx5: add Hash Rx queue object Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 16/30] net/mlx5: fix clang compilation error Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 17/30] net/mlx5: use flow to enable promiscuous mode Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 18/30] net/mlx5: use flow to enable all multi mode Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 19/30] net/mlx5: use flow to enable unicast traffic Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 20/30] net/mlx5: handle a single RSS hash key for all protocols Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 21/30] net/mlx5: remove hash Rx queues support Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 22/30] net/mlx5: fully convert a flow to verbs in validate Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 23/30] net/mlx5: process flows actions before of items Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 24/30] net/mlx5: merge internal parser and actions structures Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 25/30] net/mlx5: use a better name for the flow parser Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 26/30] net/mlx5: reorganise functions in the file Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 27/30] net/mlx5: move Verbs flows and attributes Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 28/30] net/mlx5: handle RSS hash configuration in RSS flow Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 29/30] net/mlx5: support flow director Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 30/30] net/mlx5: add new operations for isolated mode Nelio Laranjeiro

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=f5bf2ae66e792daee97a1f32c681e68e4f5044c4.1507560012.git.nelio.laranjeiro@6wind.com \
    --to=nelio.laranjeiro@6wind.com \
    --cc=adrien.mazarguil@6wind.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=yskoh@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).