DPDK patches and discussions
 help / color / mirror / Atom feed
From: Suanming Mou <suanmingm@mellanox.com>
To: viacheslavo@mellanox.com, matan@mellanox.com
Cc: orika@mellanox.com, wentaoc@mellanox.com, rasland@mellanox.com,
	dev@dpdk.org
Subject: [dpdk-dev] [PATCH v2 03/10] net/mlx5: reorganize the mlx5 flow handle struct
Date: Thu, 16 Apr 2020 16:34:24 +0800	[thread overview]
Message-ID: <1587026071-422636-4-git-send-email-suanmingm@mellanox.com> (raw)
In-Reply-To: <1587026071-422636-1-git-send-email-suanmingm@mellanox.com>

Currently, the mlx5_flow_handle struct is not fully aligned and has some
bits wasted. The members can be optimized and reorganized to save memory.

1. As metadata and meter is sharing the same flow match id, now the flow
id is limited to 24 bits due to the 8 MSBs are used as for the meter color.
Align the flow id to other bit members to 32 bits to save the mlx5 flow
handle memory.

2. The vlan_vf in struct mlx5_flow_handle_dv was already moved to struct
mlx5_flow_handle. Remove the legacy vlan_vf in struct mlx5_flow_handle_dv.

3. Reorganize the vlan_vf in mlx5_flow_handle with member SILIST_ENTRY
next to make it align with 8 bytes.

4. Reorganize the header modify in mlx5_flow_handle_dv to ILIST_ENTRY next
to make it align to with bytes.

5. Introduce __rte_pack attribute to make the struct tightly organized.

It will totally save 20 bytes memory for mlx5_flow_handle struct.

For the resource objects which are converted to indexed, align the names
with the prefix of rix_.

Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c       |  8 ++---
 drivers/net/mlx5/mlx5_flow.h       | 29 +++++++----------
 drivers/net/mlx5/mlx5_flow_dv.c    | 66 ++++++++++++++++++++------------------
 drivers/net/mlx5/mlx5_flow_verbs.c | 18 +++++------
 4 files changed, 59 insertions(+), 62 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index ffc2910..4205f23 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -2323,8 +2323,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 
 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
 		       handle_idx, dev_handle, next)
-		if (dev_handle->qrss_id)
-			flow_qrss_free_id(dev, dev_handle->qrss_id);
+		if (dev_handle->split_flow_id)
+			flow_qrss_free_id(dev, dev_handle->split_flow_id);
 }
 
 static int
@@ -3988,7 +3988,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			 * reallocation becomes possible (for example, for
 			 * other flows in other threads).
 			 */
-			dev_flow->handle->qrss_id = qrss_id;
+			dev_flow->handle->split_flow_id = qrss_id;
 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
 						   error);
 			if (ret < 0)
@@ -4101,7 +4101,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			ret = -rte_errno;
 			goto exit;
 		}
-		dev_flow->handle->mtr_flow_id = mtr_tag_id;
+		dev_flow->handle->split_flow_id = mtr_tag_id;
 		/* Setting the sfx group atrr. */
 		sfx_attr.group = sfx_attr.transfer ?
 				(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 67f767b..e78de85 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -494,44 +494,39 @@ struct mlx5_flow_rss {
 struct mlx5_flow_handle_dv {
 	/* Flow DV api: */
 	struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
-	uint32_t encap_decap;
-	/**< Index to encap/decap resource in cache. */
 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
 	/**< Pointer to modify header resource in cache. */
-	struct mlx5_vf_vlan vf_vlan;
-	/**< Structure for VF VLAN workaround. */
-	uint32_t push_vlan_res;
+	uint32_t rix_encap_decap;
+	/**< Index to encap/decap resource in cache. */
+	uint32_t rix_push_vlan;
 	/**< Index to push VLAN action resource in cache. */
-	uint32_t tag_resource;
+	uint32_t rix_tag;
 	/**< Index to the tag action. */
-};
+} __rte_packed;
 
 /** Device flow handle structure: used both for creating & destroying. */
 struct mlx5_flow_handle {
 	SILIST_ENTRY(uint32_t)next;
+	struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
 	/**< Index to next device flow handle. */
 	uint64_t layers;
 	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
 	void *ib_flow; /**< Verbs flow pointer. */
-	struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
-	union {
-		uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
-		uint32_t mtr_flow_id; /**< Unique meter match flow id. */
-	};
+	uint32_t split_flow_id:28; /**< Sub flow unique match flow id. */
 	uint32_t mark:1; /**< Metadate rxq mark flag. */
 	uint32_t fate_action:3; /**< Fate action type. */
 	union {
-		uint32_t hrxq; /**< Hash Rx queue object index. */
-		uint32_t jump; /**< Index to the jump action resource. */
-		uint32_t port_id_action;
+		uint32_t rix_hrxq; /**< Hash Rx queue object index. */
+		uint32_t rix_jump; /**< Index to the jump action resource. */
+		uint32_t rix_port_id_action;
 		/**< Index to port ID action resource. */
-		uint32_t fate_idx;
+		uint32_t rix_fate;
 		/**< Generic value indicates the fate action. */
 	};
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	struct mlx5_flow_handle_dv dvh;
 #endif
-};
+} __rte_packed;
 
 /*
  * Size for Verbs device flow handle structure only. Do not use the DV only
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index e28f01d..a8e92f2 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -2490,14 +2490,14 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->handle->dvh.encap_decap = idx;
+			dev_flow->handle->dvh.rix_encap_decap = idx;
 			dev_flow->dv.encap_decap = cache_resource;
 			return 0;
 		}
 	}
 	/* Register new encap/decap resource. */
 	cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
-				       &dev_flow->handle->dvh.encap_decap);
+				       &dev_flow->handle->dvh.rix_encap_decap);
 	if (!cache_resource)
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -2518,7 +2518,8 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps,
-		     dev_flow->handle->dvh.encap_decap, cache_resource, next);
+		     dev_flow->handle->dvh.rix_encap_decap, cache_resource,
+		     next);
 	dev_flow->dv.encap_decap = cache_resource;
 	DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
 		(void *)cache_resource,
@@ -2572,7 +2573,7 @@ struct field_modify_info modify_tcp[] = {
 			(void *)&tbl_data->jump, cnt);
 	}
 	rte_atomic32_inc(&tbl_data->jump.refcnt);
-	dev_flow->handle->jump = tbl_data->idx;
+	dev_flow->handle->rix_jump = tbl_data->idx;
 	dev_flow->dv.jump = &tbl_data->jump;
 	return 0;
 }
@@ -2613,14 +2614,14 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->handle->port_id_action = idx;
+			dev_flow->handle->rix_port_id_action = idx;
 			dev_flow->dv.port_id_action = cache_resource;
 			return 0;
 		}
 	}
 	/* Register new port id action resource. */
 	cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID],
-				       &dev_flow->handle->port_id_action);
+				       &dev_flow->handle->rix_port_id_action);
 	if (!cache_resource)
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -2643,7 +2644,8 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list,
-		     dev_flow->handle->port_id_action, cache_resource, next);
+		     dev_flow->handle->rix_port_id_action, cache_resource,
+		     next);
 	dev_flow->dv.port_id_action = cache_resource;
 	DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
 		(void *)cache_resource,
@@ -2689,14 +2691,14 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->handle->dvh.push_vlan_res = idx;
+			dev_flow->handle->dvh.rix_push_vlan = idx;
 			dev_flow->dv.push_vlan_res = cache_resource;
 			return 0;
 		}
 	}
 	/* Register new push_vlan action resource. */
 	cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
-				       &dev_flow->handle->dvh.push_vlan_res);
+				       &dev_flow->handle->dvh.rix_push_vlan);
 	if (!cache_resource)
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -2721,7 +2723,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_inc(&cache_resource->refcnt);
 	ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
 		     &sh->push_vlan_action_list,
-		     dev_flow->handle->dvh.push_vlan_res,
+		     dev_flow->handle->dvh.rix_push_vlan,
 		     cache_resource, next);
 	dev_flow->dv.push_vlan_res = cache_resource;
 	DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
@@ -7103,7 +7105,7 @@ struct field_modify_info modify_tcp[] = {
 		cache_resource = container_of
 			(entry, struct mlx5_flow_dv_tag_resource, entry);
 		rte_atomic32_inc(&cache_resource->refcnt);
-		dev_flow->handle->dvh.tag_resource = cache_resource->idx;
+		dev_flow->handle->dvh.rix_tag = cache_resource->idx;
 		dev_flow->dv.tag_resource = cache_resource;
 		DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
 			(void *)cache_resource,
@@ -7112,7 +7114,7 @@ struct field_modify_info modify_tcp[] = {
 	}
 	/* Register new resource. */
 	cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG],
-				       &dev_flow->handle->dvh.tag_resource);
+				       &dev_flow->handle->dvh.rix_tag);
 	if (!cache_resource)
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -7441,7 +7443,7 @@ struct field_modify_info modify_tcp[] = {
 			if (flow_dv_port_id_action_resource_register
 			    (dev, &port_id_resource, dev_flow, error))
 				return -rte_errno;
-			MLX5_ASSERT(!handle->port_id_action);
+			MLX5_ASSERT(!handle->rix_port_id_action);
 			dev_flow->dv.actions[actions_n++] =
 					dev_flow->dv.port_id_action->action;
 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
@@ -7468,7 +7470,7 @@ struct field_modify_info modify_tcp[] = {
 			 * right now. So the pointer to the tag resource must be
 			 * zero before the register process.
 			 */
-			MLX5_ASSERT(!handle->dvh.tag_resource);
+			MLX5_ASSERT(!handle->dvh.rix_tag);
 			if (flow_dv_tag_resource_register(dev, tag_be,
 							  dev_flow, error))
 				return -rte_errno;
@@ -7497,7 +7499,7 @@ struct field_modify_info modify_tcp[] = {
 			tag_be = mlx5_flow_mark_set
 			      (((const struct rte_flow_action_mark *)
 			       (actions->conf))->id);
-			MLX5_ASSERT(!handle->dvh.tag_resource);
+			MLX5_ASSERT(!handle->dvh.rix_tag);
 			if (flow_dv_tag_resource_register(dev, tag_be,
 							  dev_flow, error))
 				return -rte_errno;
@@ -8127,7 +8129,7 @@ struct field_modify_info modify_tcp[] = {
 				 * the special index to hrxq to mark the queue
 				 * has been allocated.
 				 */
-				dh->hrxq = UINT32_MAX;
+				dh->rix_hrxq = UINT32_MAX;
 				dv->actions[n++] = drop_hrxq->action;
 			}
 		} else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
@@ -8159,7 +8161,7 @@ struct field_modify_info modify_tcp[] = {
 					 "cannot get hash queue");
 				goto error;
 			}
-			dh->hrxq = hrxq_idx;
+			dh->rix_hrxq = hrxq_idx;
 			dv->actions[n++] = hrxq->action;
 		}
 		dh->ib_flow =
@@ -8190,13 +8192,13 @@ struct field_modify_info modify_tcp[] = {
 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
 		       handle_idx, dh, next) {
 		/* hrxq is union, don't clear it if the flag is not set. */
-		if (dh->hrxq) {
+		if (dh->rix_hrxq) {
 			if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
 				mlx5_hrxq_drop_release(dev);
-				dh->hrxq = 0;
+				dh->rix_hrxq = 0;
 			} else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
-				mlx5_hrxq_release(dev, dh->hrxq);
-				dh->hrxq = 0;
+				mlx5_hrxq_release(dev, dh->rix_hrxq);
+				dh->rix_hrxq = 0;
 			}
 		}
 		if (dh->vf_vlan.tag && dh->vf_vlan.created)
@@ -8257,7 +8259,7 @@ struct field_modify_info modify_tcp[] = {
 				     struct mlx5_flow_handle *handle)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	uint32_t idx = handle->dvh.encap_decap;
+	uint32_t idx = handle->dvh.rix_encap_decap;
 	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
 
 	cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
@@ -8302,7 +8304,7 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_flow_tbl_data_entry *tbl_data;
 
 	tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
-			     handle->jump);
+			     handle->rix_jump);
 	if (!tbl_data)
 		return 0;
 	cache_resource = &tbl_data->jump;
@@ -8370,7 +8372,7 @@ struct field_modify_info modify_tcp[] = {
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_flow_dv_port_id_action_resource *cache_resource;
-	uint32_t idx = handle->port_id_action;
+	uint32_t idx = handle->rix_port_id_action;
 
 	cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
 					idx);
@@ -8410,7 +8412,7 @@ struct field_modify_info modify_tcp[] = {
 					  struct mlx5_flow_handle *handle)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	uint32_t idx = handle->dvh.push_vlan_res;
+	uint32_t idx = handle->dvh.rix_push_vlan;
 	struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
 
 	cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
@@ -8447,19 +8449,19 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
 			       struct mlx5_flow_handle *handle)
 {
-	if (!handle->fate_idx)
+	if (!handle->rix_fate)
 		return;
 	if (handle->fate_action == MLX5_FLOW_FATE_DROP)
 		mlx5_hrxq_drop_release(dev);
 	else if (handle->fate_action == MLX5_FLOW_FATE_QUEUE)
-		mlx5_hrxq_release(dev, handle->hrxq);
+		mlx5_hrxq_release(dev, handle->rix_hrxq);
 	else if (handle->fate_action == MLX5_FLOW_FATE_JUMP)
 		flow_dv_jump_tbl_resource_release(dev, handle);
 	else if (handle->fate_action == MLX5_FLOW_FATE_PORT_ID)
 		flow_dv_port_id_action_resource_release(dev, handle);
 	else
 		DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
-	handle->fate_idx = 0;
+	handle->rix_fate = 0;
 }
 
 /**
@@ -8535,16 +8537,16 @@ struct field_modify_info modify_tcp[] = {
 		flow->dev_handles = dev_handle->next.next;
 		if (dev_handle->dvh.matcher)
 			flow_dv_matcher_release(dev, dev_handle);
-		if (dev_handle->dvh.encap_decap)
+		if (dev_handle->dvh.rix_encap_decap)
 			flow_dv_encap_decap_resource_release(dev, dev_handle);
 		if (dev_handle->dvh.modify_hdr)
 			flow_dv_modify_hdr_resource_release(dev_handle);
-		if (dev_handle->dvh.push_vlan_res)
+		if (dev_handle->dvh.rix_push_vlan)
 			flow_dv_push_vlan_action_resource_release(dev,
 								  dev_handle);
-		if (dev_handle->dvh.tag_resource)
+		if (dev_handle->dvh.rix_tag)
 			flow_dv_tag_release(dev,
-					    dev_handle->dvh.tag_resource);
+					    dev_handle->dvh.rix_tag);
 		flow_dv_fate_resource_release(dev, dev_handle);
 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
 			   tmp_idx);
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 1d56b03..f70b879 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1760,14 +1760,14 @@
 			handle->ib_flow = NULL;
 		}
 		/* hrxq is union, don't touch it only the flag is set. */
-		if (handle->hrxq) {
+		if (handle->rix_hrxq) {
 			if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
 				mlx5_hrxq_drop_release(dev);
-				handle->hrxq = 0;
+				handle->rix_hrxq = 0;
 			} else if (handle->fate_action ==
 				   MLX5_FLOW_FATE_QUEUE) {
-				mlx5_hrxq_release(dev, handle->hrxq);
-				handle->hrxq = 0;
+				mlx5_hrxq_release(dev, handle->rix_hrxq);
+				handle->rix_hrxq = 0;
 			}
 		}
 		if (handle->vf_vlan.tag && handle->vf_vlan.created)
@@ -1872,7 +1872,7 @@
 					 "cannot get hash queue");
 				goto error;
 			}
-			handle->hrxq = hrxq_idx;
+			handle->rix_hrxq = hrxq_idx;
 		}
 		MLX5_ASSERT(hrxq);
 		handle->ib_flow = mlx5_glue->create_flow(hrxq->qp,
@@ -1901,14 +1901,14 @@
 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
 		       dev_handles, handle, next) {
 		/* hrxq is union, don't touch it only the flag is set. */
-		if (handle->hrxq) {
+		if (handle->rix_hrxq) {
 			if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
 				mlx5_hrxq_drop_release(dev);
-				handle->hrxq = 0;
+				handle->rix_hrxq = 0;
 			} else if (handle->fate_action ==
 				   MLX5_FLOW_FATE_QUEUE) {
-				mlx5_hrxq_release(dev, handle->hrxq);
-				handle->hrxq = 0;
+				mlx5_hrxq_release(dev, handle->rix_hrxq);
+				handle->rix_hrxq = 0;
 			}
 		}
 		if (handle->vf_vlan.tag && handle->vf_vlan.created)
-- 
1.8.3.1


  parent reply	other threads:[~2020-04-16  8:35 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <https://patches.dpdk.org/cover/68470/>
2020-04-16  8:34 ` [dpdk-dev] [PATCH v2 00/10] net/mlx5: optimize flow structure Suanming Mou
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 01/10] net/mlx5: reorganize fate actions as union Suanming Mou
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 02/10] net/mlx5: optimize action flags in flow handle Suanming Mou
2020-04-16  8:34   ` Suanming Mou [this message]
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 04/10] net/mlx5: optimize flow meter handle type Suanming Mou
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 05/10] net/mlx5: allocate meter from indexed pool Suanming Mou
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 06/10] net/mlx5: convert mark copy resource to indexed Suanming Mou
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 07/10] net/mlx5: optimize flow director filter memory Suanming Mou
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 08/10] net/mlx5: optimize mlx5 flow RSS struct Suanming Mou
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 09/10] net/mlx5: allocate rte flow from indexed pool Suanming Mou
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 10/10] net/mlx5: reorganize rte flow structure Suanming Mou
2020-04-16 17:08   ` [dpdk-dev] [PATCH v2 00/10] net/mlx5: optimize " Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1587026071-422636-4-git-send-email-suanmingm@mellanox.com \
    --to=suanmingm@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=matan@mellanox.com \
    --cc=orika@mellanox.com \
    --cc=rasland@mellanox.com \
    --cc=viacheslavo@mellanox.com \
    --cc=wentaoc@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).