From: Suanming Mou <suanmingm@mellanox.com>
To: viacheslavo@mellanox.com, matan@mellanox.com
Cc: rasland@mellanox.com, dev@dpdk.org
Subject: [dpdk-dev] [PATCH v2 10/10] net/mlx5: convert flow dev handle to indexed
Date: Thu, 16 Apr 2020 10:42:08 +0800 [thread overview]
Message-ID: <1587004928-328077-11-git-send-email-suanmingm@mellanox.com> (raw)
In-Reply-To: <1587004928-328077-1-git-send-email-suanmingm@mellanox.com>
This commit converts flow dev handle to indexed.
Change the mlx5 flow handle from pointer to uint32_t saves memory for
flow. With million flow, it saves several MBytes memory.
Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
drivers/net/mlx5/mlx5.c | 30 ++++++++++++++++++++++++++++--
drivers/net/mlx5/mlx5.h | 3 +++
drivers/net/mlx5/mlx5_flow.c | 23 +++++++++++++++++------
drivers/net/mlx5/mlx5_flow.h | 7 ++++---
drivers/net/mlx5/mlx5_flow_dv.c | 35 +++++++++++++++++++++++++++--------
drivers/net/mlx5/mlx5_flow_verbs.c | 29 ++++++++++++++++++++++-------
6 files changed, 101 insertions(+), 26 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index dda5f72..331318a 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -199,6 +199,7 @@ struct mlx5_dev_spawn_data {
static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
static struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
{
.size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
.trunk_size = 64,
@@ -254,6 +255,7 @@ struct mlx5_dev_spawn_data {
.free = rte_free,
.type = "mlx5_jump_ipool",
},
+#endif
{
.size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),
.trunk_size = 64,
@@ -265,6 +267,17 @@ struct mlx5_dev_spawn_data {
.free = rte_free,
.type = "mlx5_jump_ipool",
},
+ {
+ .size = sizeof(struct mlx5_flow_handle),
+ .trunk_size = 64,
+ .grow_trunk = 3,
+ .grow_shift = 2,
+ .need_lock = 0,
+ .release_mem_en = 1,
+ .malloc = rte_malloc_socket,
+ .free = rte_free,
+ .type = "mlx5_flow_handle_ipool",
+ },
};
@@ -491,12 +504,25 @@ struct mlx5_flow_id_pool *
*
* @param[in] sh
* Pointer to mlx5_ibv_shared object.
+ * @param[in] sh
+ * Pointer to user dev config.
*/
static void
-mlx5_flow_ipool_create(struct mlx5_ibv_shared *sh)
+mlx5_flow_ipool_create(struct mlx5_ibv_shared *sh,
+ const struct mlx5_dev_config *config __rte_unused)
{
uint8_t i;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ /*
+ * While DV is supported, user chooses the verbs mode,
+ * the mlx5 flow handle size is different with the
+ * MLX5_FLOW_HANDLE_VERBS_SIZE.
+ */
+ if (!config->dv_flow_en)
+ mlx5_ipool_cfg[MLX5_IPOOL_MLX5_FLOW].size =
+ MLX5_FLOW_HANDLE_VERBS_SIZE;
+#endif
for (i = 0; i < MLX5_IPOOL_MAX; ++i)
sh->ipool[i] = mlx5_ipool_create(&mlx5_ipool_cfg[i]);
}
@@ -731,7 +757,7 @@ struct mlx5_flow_id_pool *
goto error;
}
mlx5_flow_counters_mng_init(sh);
- mlx5_flow_ipool_create(sh);
+ mlx5_flow_ipool_create(sh, config);
/* Add device to memory callback list. */
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 37ff4a0..2c4e823 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -45,12 +45,15 @@
enum mlx5_ipool_index {
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
MLX5_IPOOL_PUSH_VLAN, /* Pool for push vlan resource. */
MLX5_IPOOL_TAG, /* Pool for tag resource. */
MLX5_IPOOL_PORT_ID, /* Pool for port id resource. */
MLX5_IPOOL_JUMP, /* Pool for jump resource. */
+#endif
MLX5_IPOOL_HRXQ, /* Pool for hrxq resource. */
+ MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */
MLX5_IPOOL_MAX,
};
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index c44bc1f..bf95a40 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -775,9 +775,12 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
static void
flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t handle_idx;
struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, dev_handle, next)
flow_drv_rxq_flags_set(dev, flow, dev_handle);
}
@@ -847,9 +850,12 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
static void
flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t handle_idx;
struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, dev_handle, next)
flow_drv_rxq_flags_trim(dev, flow, dev_handle);
}
@@ -2313,9 +2319,12 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
struct rte_flow *flow)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t handle_idx;
struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, dev_handle, next)
if (dev_handle->qrss_id)
flow_qrss_free_id(dev, dev_handle->qrss_id);
}
@@ -3459,7 +3468,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
dev_flow->flow = flow;
dev_flow->external = external;
/* Subflow object was created, we must include one in the list. */
- LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next);
+ SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+ dev_flow->handle, next);
/*
* If dev_flow is as one of the suffix flow, some actions in suffix
* flow may need some user defined item layer flags.
@@ -4264,7 +4274,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
}
- LIST_INIT(&flow->dev_handles);
+ flow->dev_handles = 0;
if (rss && rss->types) {
unsigned int graph_root;
@@ -4312,7 +4322,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
goto error;
dev_flow->flow = flow;
dev_flow->external = 0;
- LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next);
+ SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+ dev_flow->handle, next);
ret = flow_drv_translate(dev, dev_flow, &attr_tx,
items_tx.items,
actions_hairpin_tx.actions, error);
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 26f8704..a2ea122 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -502,8 +502,8 @@ struct mlx5_flow_handle_dv {
/** Device flow handle structure: used both for creating & destroying. */
struct mlx5_flow_handle {
- LIST_ENTRY(mlx5_flow_handle) next;
- /**< Pointer to next device flow handle. */
+ SILIST_ENTRY(uint32_t)next;
+ /**< Index to next device flow handle. */
uint64_t layers;
/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
uint64_t act_flags;
@@ -632,6 +632,7 @@ struct mlx5_flow {
struct mlx5_flow_verbs_workspace verbs;
};
struct mlx5_flow_handle *handle;
+ uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */
};
/* Flow meter state. */
@@ -747,7 +748,7 @@ struct rte_flow {
struct mlx5_flow_mreg_copy_resource *mreg_copy;
/**< pointer to metadata register copy table resource. */
struct mlx5_flow_meter *meter; /**< Holds flow meter. */
- LIST_HEAD(dev_handles, mlx5_flow_handle) dev_handles;
+ uint32_t dev_handles;
/**< Device flow handles that are part of the flow. */
struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */
uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 5e6143b..f001b34 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -5333,7 +5333,7 @@ struct field_modify_info modify_tcp[] = {
const struct rte_flow_action actions[] __rte_unused,
struct rte_flow_error *error)
{
- size_t size = sizeof(struct mlx5_flow_handle);
+ uint32_t handle_idx = 0;
struct mlx5_flow *dev_flow;
struct mlx5_flow_handle *dev_handle;
struct mlx5_priv *priv = dev->data->dev_private;
@@ -5345,7 +5345,8 @@ struct field_modify_info modify_tcp[] = {
"not free temporary device flow");
return NULL;
}
- dev_handle = rte_calloc(__func__, 1, size, 0);
+ dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ &handle_idx);
if (!dev_handle) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -5355,6 +5356,7 @@ struct field_modify_info modify_tcp[] = {
/* No multi-thread supporting. */
dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
dev_flow->handle = dev_handle;
+ dev_flow->handle_idx = handle_idx;
dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
/*
* The matching value needs to be cleared to 0 before using. In the
@@ -8088,6 +8090,7 @@ struct field_modify_info modify_tcp[] = {
struct mlx5_flow_handle_dv *dv_h;
struct mlx5_flow *dev_flow;
struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t handle_idx;
int n;
int err;
int idx;
@@ -8172,7 +8175,8 @@ struct field_modify_info modify_tcp[] = {
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
- LIST_FOREACH(dh, &flow->dev_handles, next) {
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, dh, next) {
if (dh->hrxq) {
if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
@@ -8429,10 +8433,17 @@ struct field_modify_info modify_tcp[] = {
__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct mlx5_flow_handle *dh;
+ uint32_t handle_idx;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (!flow)
return;
- LIST_FOREACH(dh, &flow->dev_handles, next) {
+ handle_idx = flow->dev_handles;
+ while (handle_idx) {
+ dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ handle_idx);
+ if (!dh)
+ return;
if (dh->ib_flow) {
claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
dh->ib_flow = NULL;
@@ -8446,6 +8457,7 @@ struct field_modify_info modify_tcp[] = {
}
if (dh->vf_vlan.tag && dh->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
+ handle_idx = dh->next.next;
}
}
@@ -8462,6 +8474,7 @@ struct field_modify_info modify_tcp[] = {
__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct mlx5_flow_handle *dev_handle;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (!flow)
return;
@@ -8474,9 +8487,14 @@ struct field_modify_info modify_tcp[] = {
mlx5_flow_meter_detach(flow->meter);
flow->meter = NULL;
}
- while (!LIST_EMPTY(&flow->dev_handles)) {
- dev_handle = LIST_FIRST(&flow->dev_handles);
- LIST_REMOVE(dev_handle, next);
+ while (flow->dev_handles) {
+ uint32_t tmp_idx = flow->dev_handles;
+
+ dev_handle = mlx5_ipool_get(priv->sh->ipool
+ [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
+ if (!dev_handle)
+ return;
+ flow->dev_handles = dev_handle->next.next;
if (dev_handle->dvh.matcher)
flow_dv_matcher_release(dev, dev_handle);
if (dev_handle->dvh.encap_decap)
@@ -8494,7 +8512,8 @@ struct field_modify_info modify_tcp[] = {
if (dev_handle->dvh.tag_resource)
flow_dv_tag_release(dev,
dev_handle->dvh.tag_resource);
- rte_free(dev_handle);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ tmp_idx);
}
}
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index aa55f4e..9525fd4 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1505,6 +1505,7 @@
struct rte_flow_error *error)
{
size_t size = 0;
+ uint32_t handle_idx = 0;
struct mlx5_flow *dev_flow;
struct mlx5_flow_handle *dev_handle;
struct mlx5_priv *priv = dev->data->dev_private;
@@ -1524,7 +1525,8 @@
"not free temporary device flow");
return NULL;
}
- dev_handle = rte_calloc(__func__, 1, MLX5_FLOW_HANDLE_VERBS_SIZE, 0);
+ dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ &handle_idx);
if (!dev_handle) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -1534,6 +1536,7 @@
/* No multi-thread supporting. */
dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
dev_flow->handle = dev_handle;
+ dev_flow->handle_idx = handle_idx;
/* Memcpy is used, only size needs to be cleared to 0. */
dev_flow->verbs.size = 0;
dev_flow->verbs.attr.num_of_specs = 0;
@@ -1739,11 +1742,14 @@
static void
flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_handle *handle;
+ uint32_t handle_idx;
if (!flow)
return;
- LIST_FOREACH(handle, &flow->dev_handles, next) {
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, handle, next) {
if (handle->ib_flow) {
claim_zero(mlx5_glue->destroy_flow(handle->ib_flow));
handle->ib_flow = NULL;
@@ -1771,15 +1777,22 @@
static void
flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_handle *handle;
if (!flow)
return;
flow_verbs_remove(dev, flow);
- while (!LIST_EMPTY(&flow->dev_handles)) {
- handle = LIST_FIRST(&flow->dev_handles);
- LIST_REMOVE(handle, next);
- rte_free(handle);
+ while (flow->dev_handles) {
+ uint32_t tmp_idx = flow->dev_handles;
+
+ handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ tmp_idx);
+ if (!handle)
+ return;
+ flow->dev_handles = handle->next.next;
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ tmp_idx);
}
if (flow->counter) {
flow_verbs_counter_release(dev, flow->counter);
@@ -1808,6 +1821,7 @@
struct mlx5_flow_handle *handle;
struct mlx5_flow *dev_flow;
struct mlx5_hrxq *hrxq;
+ uint32_t dev_handles;
int err;
int idx;
@@ -1875,7 +1889,8 @@
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
- LIST_FOREACH(handle, &flow->dev_handles, next) {
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ dev_handles, handle, next) {
if (handle->hrxq) {
if (handle->act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
--
1.8.3.1
next prev parent reply other threads:[~2020-04-16 2:43 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-13 1:11 [dpdk-dev] [PATCH 00/10] net/mlx5: optimize flow resource allocation Suanming Mou
2020-04-13 1:11 ` [dpdk-dev] [PATCH 01/10] net/mlx5: add indexed memory pool Suanming Mou
2020-04-13 1:11 ` [dpdk-dev] [PATCH 02/10] net/mlx5: add trunk dynamic grow for indexed pool Suanming Mou
2020-04-13 1:11 ` [dpdk-dev] [PATCH 03/10] net/mlx5: add trunk release " Suanming Mou
2020-04-13 1:11 ` [dpdk-dev] [PATCH 04/10] net/mlx5: convert encap/decap resource to indexed Suanming Mou
2020-04-13 1:11 ` [dpdk-dev] [PATCH 05/10] net/mlx5: convert push VLAN " Suanming Mou
2020-04-13 1:11 ` [dpdk-dev] [PATCH 06/10] net/mlx5: convert tag " Suanming Mou
2020-04-13 1:11 ` [dpdk-dev] [PATCH 07/10] net/mlx5: convert port id action " Suanming Mou
2020-04-13 1:11 ` [dpdk-dev] [PATCH 08/10] net/mlx5: convert jump resource " Suanming Mou
2020-04-13 1:11 ` [dpdk-dev] [PATCH 09/10] net/mlx5: convert hrxq " Suanming Mou
2020-04-16 2:41 ` [dpdk-dev] [PATCH v2 00/10] net/mlx5: optimize flow resource allocation Suanming Mou
2020-04-16 2:41 ` [dpdk-dev] [PATCH v2 01/10] net/mlx5: add indexed memory pool Suanming Mou
2020-04-16 2:42 ` [dpdk-dev] [PATCH v2 02/10] net/mlx5: add trunk dynamic grow for indexed pool Suanming Mou
2020-04-16 2:42 ` [dpdk-dev] [PATCH v2 03/10] net/mlx5: add trunk release " Suanming Mou
2020-04-16 2:42 ` [dpdk-dev] [PATCH v2 04/10] net/mlx5: convert encap/decap resource to indexed Suanming Mou
2020-04-16 2:42 ` [dpdk-dev] [PATCH v2 05/10] net/mlx5: convert push VLAN " Suanming Mou
2020-04-16 2:42 ` [dpdk-dev] [PATCH v2 06/10] net/mlx5: convert tag " Suanming Mou
2020-04-16 2:42 ` [dpdk-dev] [PATCH v2 07/10] net/mlx5: convert port id action " Suanming Mou
2020-04-16 2:42 ` [dpdk-dev] [PATCH v2 08/10] net/mlx5: convert jump resource " Suanming Mou
2020-04-16 2:42 ` [dpdk-dev] [PATCH v2 09/10] net/mlx5: convert hrxq " Suanming Mou
2020-04-16 2:42 ` Suanming Mou [this message]
2020-04-16 15:08 ` [dpdk-dev] [PATCH v2 00/10] net/mlx5: optimize flow resource allocation Raslan Darawsheh
2020-04-17 14:58 ` Ferruh Yigit
2020-04-18 1:46 ` Suanming Mou
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1587004928-328077-11-git-send-email-suanmingm@mellanox.com \
--to=suanmingm@mellanox.com \
--cc=dev@dpdk.org \
--cc=matan@mellanox.com \
--cc=rasland@mellanox.com \
--cc=viacheslavo@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).