From: Suanming Mou <suanmingm@mellanox.com>
To: Matan Azrad <matan@mellanox.com>,
Shahaf Shuler <shahafs@mellanox.com>,
Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Cc: rasland@mellanox.com, dev@dpdk.org
Subject: [dpdk-dev] [PATCH 04/10] net/mlx5: convert encap/decap resource to indexed
Date: Mon, 13 Apr 2020 09:11:43 +0800 [thread overview]
Message-ID: <1586740309-449310-5-git-send-email-suanmingm@mellanox.com> (raw)
In-Reply-To: <1586740309-449310-1-git-send-email-suanmingm@mellanox.com>
This commit converts the flow encap/decap resource to indexed.
Using the uint32_t index instead of pointer saves 4 bytes memory for the
flow handle. For millions flows, it will save several MBytes of memory.
Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
drivers/net/mlx5/mlx5.c | 47 ++++++++++++++++++++++++++++++++++++++++
drivers/net/mlx5/mlx5.h | 9 +++++++-
drivers/net/mlx5/mlx5_flow.h | 8 ++++---
drivers/net/mlx5/mlx5_flow_dv.c | 48 +++++++++++++++++++++++++++--------------
4 files changed, 92 insertions(+), 20 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 6a11b14..f4c2c14 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -194,6 +194,21 @@ struct mlx5_dev_spawn_data {
static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
+static struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
+ {
+ .size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
+ .trunk_size = 64,
+ .grow_trunk = 3,
+ .grow_shift = 2,
+ .need_lock = 0,
+ .release_mem_en = 1,
+ .malloc = rte_malloc_socket,
+ .free = rte_free,
+ .type = "mlx5_encap_decap_ipool",
+ },
+};
+
+
#define MLX5_FLOW_MIN_ID_POOL_SIZE 512
#define MLX5_ID_GENERATION_ARRAY_FACTOR 16
@@ -411,6 +426,36 @@ struct mlx5_flow_id_pool *
}
/**
+ * Initialize the flow resources' indexed mempool.
+ *
+ * @param[in] sh
+ * Pointer to mlx5_ibv_shared object.
+ */
+static void
+mlx5_flow_ipool_create(struct mlx5_ibv_shared *sh)
+{
+ uint8_t i;
+
+ for (i = 0; i < MLX5_IPOOL_MAX; ++i)
+ sh->ipool[i] = mlx5_ipool_create(&mlx5_ipool_cfg[i]);
+}
+
+/**
+ * Release the flow resources' indexed mempool.
+ *
+ * @param[in] sh
+ * Pointer to mlx5_ibv_shared object.
+ */
+static void
+mlx5_flow_ipool_destroy(struct mlx5_ibv_shared *sh)
+{
+ uint8_t i;
+
+ for (i = 0; i < MLX5_IPOOL_MAX; ++i)
+ mlx5_ipool_destroy(sh->ipool[i]);
+}
+
+/**
* Extract pdn of PD object using DV API.
*
* @param[in] pd
@@ -625,6 +670,7 @@ struct mlx5_flow_id_pool *
goto error;
}
mlx5_flow_counters_mng_init(sh);
+ mlx5_flow_ipool_create(sh);
/* Add device to memory callback list. */
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
@@ -697,6 +743,7 @@ struct mlx5_flow_id_pool *
* Only primary process handles async device events.
**/
mlx5_flow_counters_mng_close(sh);
+ mlx5_flow_ipool_destroy(sh);
MLX5_ASSERT(!sh->intr_cnt);
if (sh->intr_cnt)
mlx5_intr_callback_unregister
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 34ab475..a1a7294 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -51,6 +51,11 @@ enum mlx5_mp_req_type {
MLX5_MP_REQ_QUEUE_STATE_MODIFY,
};
+enum mlx5_ipool_index {
+ MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
+ MLX5_IPOOL_MAX,
+};
+
struct mlx5_mp_arg_queue_state_modify {
uint8_t is_wq; /* Set if WQ. */
uint16_t queue_id; /* DPDK queue ID. */
@@ -446,7 +451,7 @@ struct mlx5_ibv_shared {
/* Direct Rules tables for FDB, NIC TX+RX */
void *esw_drop_action; /* Pointer to DR E-Switch drop action. */
void *pop_vlan_action; /* Pointer to DR pop VLAN action. */
- LIST_HEAD(encap_decap, mlx5_flow_dv_encap_decap_resource) encaps_decaps;
+ uint32_t encaps_decaps; /* Encap/decap action indexed memory list. */
LIST_HEAD(modify_cmd, mlx5_flow_dv_modify_hdr_resource) modify_cmds;
struct mlx5_hlist *tag_table;
LIST_HEAD(port_id_action_list, mlx5_flow_dv_port_id_action_resource)
@@ -454,6 +459,8 @@ struct mlx5_ibv_shared {
LIST_HEAD(push_vlan_action_list, mlx5_flow_dv_push_vlan_action_resource)
push_vlan_action_list; /* List of push VLAN actions. */
struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
+ struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
+ /* Memory Pool for mlx5 flow resources. */
/* Shared interrupt handler section. */
pthread_mutex_t intr_mutex; /* Interrupt config mutex. */
uint32_t intr_cnt; /* Interrupt handler reference counter. */
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 0f0e59d..77d6b2d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -364,7 +364,7 @@ struct mlx5_flow_dv_matcher {
/* Encap/decap resource structure. */
struct mlx5_flow_dv_encap_decap_resource {
- LIST_ENTRY(mlx5_flow_dv_encap_decap_resource) next;
+ ILIST_ENTRY(uint32_t)next;
/* Pointer to next element. */
rte_atomic32_t refcnt; /**< Reference counter. */
void *verbs_action;
@@ -482,8 +482,8 @@ struct mlx5_flow_rss {
struct mlx5_flow_handle_dv {
/* Flow DV api: */
struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
- struct mlx5_flow_dv_encap_decap_resource *encap_decap;
- /**< Pointer to encap/decap resource in cache. */
+ uint32_t encap_decap;
+ /**< Index to encap/decap resource in cache. */
struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
/**< Pointer to modify header resource in cache. */
struct mlx5_flow_dv_jump_tbl_resource *jump;
@@ -543,6 +543,8 @@ struct mlx5_flow_dv_workspace {
uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
int actions_n; /**< number of actions. */
void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */
+ struct mlx5_flow_dv_encap_decap_resource *encap_decap;
+ /**< Pointer to encap/decap resource in cache. */
struct mlx5_flow_dv_match_params value;
/**< Holds the value that the packet is compared to. */
};
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index f5d98d2..8275098 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -2437,6 +2437,7 @@ struct field_modify_info modify_tcp[] = {
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
+ uint32_t idx = 0;
resource->flags = dev_flow->dv.group ? 0 : 1;
if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
@@ -2446,7 +2447,8 @@ struct field_modify_info modify_tcp[] = {
else
domain = sh->tx_domain;
/* Lookup a matching resource from cache. */
- LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
+ ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], sh->encaps_decaps, idx,
+ cache_resource, next) {
if (resource->reformat_type == cache_resource->reformat_type &&
resource->ft_type == cache_resource->ft_type &&
resource->flags == cache_resource->flags &&
@@ -2458,12 +2460,14 @@ struct field_modify_info modify_tcp[] = {
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle->dvh.encap_decap = cache_resource;
+ dev_flow->handle->dvh.encap_decap = idx;
+ dev_flow->dv.encap_decap = cache_resource;
return 0;
}
}
/* Register new encap/decap resource. */
- cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
+ cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
+ &dev_flow->handle->dvh.encap_decap);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -2483,8 +2487,9 @@ struct field_modify_info modify_tcp[] = {
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
- LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
- dev_flow->handle->dvh.encap_decap = cache_resource;
+ ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps,
+ dev_flow->handle->dvh.encap_decap, cache_resource, next);
+ dev_flow->dv.encap_decap = cache_resource;
DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
@@ -3080,6 +3085,7 @@ struct field_modify_info modify_tcp[] = {
const struct rte_flow_action_raw_encap *encap_data;
struct mlx5_flow_dv_encap_decap_resource res;
+ memset(&res, 0, sizeof(res));
encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
res.size = encap_data->size;
memcpy(res.buf, encap_data->data, res.size);
@@ -7572,7 +7578,7 @@ struct field_modify_info modify_tcp[] = {
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- handle->dvh.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->verbs_action;
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
@@ -7582,7 +7588,7 @@ struct field_modify_info modify_tcp[] = {
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- handle->dvh.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->verbs_action;
action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
@@ -7592,7 +7598,7 @@ struct field_modify_info modify_tcp[] = {
(dev, actions, dev_flow, attr, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- handle->dvh.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->verbs_action;
} else {
/* Handle encap without preceding decap. */
if (flow_dv_create_action_l2_encap
@@ -7600,7 +7606,7 @@ struct field_modify_info modify_tcp[] = {
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- handle->dvh.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->verbs_action;
}
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
@@ -7612,7 +7618,7 @@ struct field_modify_info modify_tcp[] = {
(dev, dev_flow, attr->transfer, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- handle->dvh.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->verbs_action;
}
/* If decap is followed by encap, handle it at encap. */
action_flags |= MLX5_FLOW_ACTION_DECAP;
@@ -8166,6 +8172,8 @@ struct field_modify_info modify_tcp[] = {
/**
* Release an encap/decap resource.
*
+ * @param dev
+ * Pointer to Ethernet device.
* @param handle
* Pointer to mlx5_flow_handle.
*
@@ -8173,11 +8181,17 @@ struct field_modify_info modify_tcp[] = {
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_encap_decap_resource_release(struct mlx5_flow_handle *handle)
+flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *handle)
{
- struct mlx5_flow_dv_encap_decap_resource *cache_resource =
- handle->dvh.encap_decap;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t idx = handle->dvh.encap_decap;
+ struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+ cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
+ idx);
+ if (!cache_resource)
+ return 0;
MLX5_ASSERT(cache_resource->verbs_action);
DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
(void *)cache_resource,
@@ -8185,8 +8199,10 @@ struct field_modify_info modify_tcp[] = {
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
claim_zero(mlx5_glue->destroy_flow_action
(cache_resource->verbs_action));
- LIST_REMOVE(cache_resource, next);
- rte_free(cache_resource);
+ ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
+ &priv->sh->encaps_decaps, idx,
+ cache_resource, next);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
DRV_LOG(DEBUG, "encap/decap resource %p: removed",
(void *)cache_resource);
return 0;
@@ -8388,7 +8404,7 @@ struct field_modify_info modify_tcp[] = {
if (dev_handle->dvh.matcher)
flow_dv_matcher_release(dev, dev_handle);
if (dev_handle->dvh.encap_decap)
- flow_dv_encap_decap_resource_release(dev_handle);
+ flow_dv_encap_decap_resource_release(dev, dev_handle);
if (dev_handle->dvh.modify_hdr)
flow_dv_modify_hdr_resource_release(dev_handle);
if (dev_handle->dvh.jump)
--
1.8.3.1
next prev parent reply other threads:[~2020-04-13 1:12 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-13 1:11 [dpdk-dev] [PATCH 00/10] net/mlx5: optimize flow resource allocation Suanming Mou
2020-04-13 1:11 ` [dpdk-dev] [PATCH 01/10] net/mlx5: add indexed memory pool Suanming Mou
2020-04-13 1:11 ` [dpdk-dev] [PATCH 02/10] net/mlx5: add trunk dynamic grow for indexed pool Suanming Mou
2020-04-13 1:11 ` [dpdk-dev] [PATCH 03/10] net/mlx5: add trunk release " Suanming Mou
2020-04-13 1:11 ` Suanming Mou [this message]
2020-04-13 1:11 ` [dpdk-dev] [PATCH 05/10] net/mlx5: convert push VLAN resource to indexed Suanming Mou
2020-04-13 1:11 ` [dpdk-dev] [PATCH 06/10] net/mlx5: convert tag " Suanming Mou
2020-04-13 1:11 ` [dpdk-dev] [PATCH 07/10] net/mlx5: convert port id action " Suanming Mou
2020-04-13 1:11 ` [dpdk-dev] [PATCH 08/10] net/mlx5: convert jump resource " Suanming Mou
2020-04-13 1:11 ` [dpdk-dev] [PATCH 09/10] net/mlx5: convert hrxq " Suanming Mou
2020-04-16 2:41 ` [dpdk-dev] [PATCH v2 00/10] net/mlx5: optimize flow resource allocation Suanming Mou
2020-04-16 2:41 ` [dpdk-dev] [PATCH v2 01/10] net/mlx5: add indexed memory pool Suanming Mou
2020-04-16 2:42 ` [dpdk-dev] [PATCH v2 02/10] net/mlx5: add trunk dynamic grow for indexed pool Suanming Mou
2020-04-16 2:42 ` [dpdk-dev] [PATCH v2 03/10] net/mlx5: add trunk release " Suanming Mou
2020-04-16 2:42 ` [dpdk-dev] [PATCH v2 04/10] net/mlx5: convert encap/decap resource to indexed Suanming Mou
2020-04-16 2:42 ` [dpdk-dev] [PATCH v2 05/10] net/mlx5: convert push VLAN " Suanming Mou
2020-04-16 2:42 ` [dpdk-dev] [PATCH v2 06/10] net/mlx5: convert tag " Suanming Mou
2020-04-16 2:42 ` [dpdk-dev] [PATCH v2 07/10] net/mlx5: convert port id action " Suanming Mou
2020-04-16 2:42 ` [dpdk-dev] [PATCH v2 08/10] net/mlx5: convert jump resource " Suanming Mou
2020-04-16 2:42 ` [dpdk-dev] [PATCH v2 09/10] net/mlx5: convert hrxq " Suanming Mou
2020-04-16 2:42 ` [dpdk-dev] [PATCH v2 10/10] net/mlx5: convert flow dev handle " Suanming Mou
2020-04-16 15:08 ` [dpdk-dev] [PATCH v2 00/10] net/mlx5: optimize flow resource allocation Raslan Darawsheh
2020-04-17 14:58 ` Ferruh Yigit
2020-04-18 1:46 ` Suanming Mou
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1586740309-449310-5-git-send-email-suanmingm@mellanox.com \
--to=suanmingm@mellanox.com \
--cc=dev@dpdk.org \
--cc=matan@mellanox.com \
--cc=rasland@mellanox.com \
--cc=shahafs@mellanox.com \
--cc=viacheslavo@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).