From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id B2EF7A0577; Mon, 13 Apr 2020 03:13:27 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 1C9E41BF31; Mon, 13 Apr 2020 03:12:15 +0200 (CEST) Received: from git-send-mailer.rdmz.labs.mlnx (unknown [37.142.13.130]) by dpdk.org (Postfix) with ESMTP id 115AC1BFD9 for ; Mon, 13 Apr 2020 03:12:13 +0200 (CEST) From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: rasland@mellanox.com, dev@dpdk.org Date: Mon, 13 Apr 2020 09:11:48 +0800 Message-Id: <1586740309-449310-10-git-send-email-suanmingm@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1586740309-449310-1-git-send-email-suanmingm@mellanox.com> References: <1586740309-449310-1-git-send-email-suanmingm@mellanox.com> Subject: [dpdk-dev] [PATCH 09/10] net/mlx5: convert hrxq to indexed X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit converts hrxq to indexed. Using the uint32_t index instead of pointer saves 4 bytes memory for the flow handle. For millions flows, it will save several MBytes of memory. Signed-off-by: Suanming Mou Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5.c | 31 +++++++++++++++++-------- drivers/net/mlx5/mlx5.h | 3 ++- drivers/net/mlx5/mlx5_flow.h | 2 +- drivers/net/mlx5/mlx5_flow_dv.c | 46 ++++++++++++++++++++----------------- drivers/net/mlx5/mlx5_flow_verbs.c | 24 +++++++++++-------- drivers/net/mlx5/mlx5_rxq.c | 47 ++++++++++++++++++++++++-------------- drivers/net/mlx5/mlx5_rxtx.h | 22 +++++++++--------- 7 files changed, 104 insertions(+), 71 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 9a66ed5..b108c0e 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -250,6 +250,17 @@ struct mlx5_dev_spawn_data { .free = rte_free, .type = "mlx5_jump_ipool", }, + { + .size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN), + .trunk_size = 64, + .grow_trunk = 3, + .grow_shift = 2, + .need_lock = 0, + .release_mem_en = 1, + .malloc = rte_malloc_socket, + .free = rte_free, + .type = "mlx5_jump_ipool", + }, }; @@ -1386,16 +1397,6 @@ struct mlx5_flow_id_pool * close(priv->nl_socket_rdma); if (priv->vmwa_context) mlx5_vlan_vmwa_exit(priv->vmwa_context); - if (priv->sh) { - /* - * Free the shared context in last turn, because the cleanup - * routines above may use some shared fields, like - * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing - * ifindex if Netlink fails. - */ - mlx5_free_shared_ibctx(priv->sh); - priv->sh = NULL; - } ret = mlx5_hrxq_verify(dev); if (ret) DRV_LOG(WARNING, "port %u some hash Rx queue still remain", @@ -1424,6 +1425,16 @@ struct mlx5_flow_id_pool * if (ret) DRV_LOG(WARNING, "port %u some flows still remain", dev->data->port_id); + if (priv->sh) { + /* + * Free the shared context in last turn, because the cleanup + * routines above may use some shared fields, like + * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing + * ifindex if Netlink fails. + */ + mlx5_free_shared_ibctx(priv->sh); + priv->sh = NULL; + } if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { unsigned int c = 0; uint16_t port_id; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 2a05c09..3a01c82 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -57,6 +57,7 @@ enum mlx5_ipool_index { MLX5_IPOOL_TAG, /* Pool for tag resource. */ MLX5_IPOOL_PORT_ID, /* Pool for port id resource. */ MLX5_IPOOL_JUMP, /* Pool for jump resource. */ + MLX5_IPOOL_HRXQ, /* Pool for hrxq resource. */ MLX5_IPOOL_MAX, }; @@ -534,7 +535,7 @@ struct mlx5_priv { int flow_idx; /* Intermediate device flow index. */ LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */ - LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */ + uint32_t hrxqs; /* Verbs Hash Rx queues. */ LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */ LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */ /* Indirection tables. */ diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index ce7e929..fcc887d 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -509,7 +509,7 @@ struct mlx5_flow_handle { uint64_t act_flags; /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */ void *ib_flow; /**< Verbs flow pointer. */ - struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */ + uint32_t hrxq; /**< Hash Rx queue object index. */ struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */ union { uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 9fe0446..6489213 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -8080,8 +8080,9 @@ struct field_modify_info modify_tcp[] = { if (dv->transfer) { dv->actions[n++] = priv->sh->esw_drop_action; } else { - dh->hrxq = mlx5_hrxq_drop_new(dev); - if (!dh->hrxq) { + struct mlx5_hrxq *drop_hrxq; + drop_hrxq = mlx5_hrxq_drop_new(dev); + if (!drop_hrxq) { rte_flow_error_set (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, @@ -8089,28 +8090,31 @@ struct field_modify_info modify_tcp[] = { "cannot get drop hash queue"); goto error; } - dv->actions[n++] = dh->hrxq->action; + dv->actions[n++] = drop_hrxq->action; } } else if (dh->act_flags & (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) { struct mlx5_hrxq *hrxq; + uint32_t hrxq_idx; MLX5_ASSERT(flow->rss.queue); - hrxq = mlx5_hrxq_get(dev, flow->rss.key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - (*flow->rss.queue), - flow->rss.queue_num); - if (!hrxq) { - hrxq = mlx5_hrxq_new - (dev, flow->rss.key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - (*flow->rss.queue), - flow->rss.queue_num, - !!(dh->layers & - MLX5_FLOW_LAYER_TUNNEL)); + hrxq_idx = mlx5_hrxq_get(dev, flow->rss.key, + MLX5_RSS_HASH_KEY_LEN, + dev_flow->hash_fields, + (*flow->rss.queue), + flow->rss.queue_num); + if (!hrxq_idx) { + hrxq_idx = mlx5_hrxq_new + (dev, flow->rss.key, + MLX5_RSS_HASH_KEY_LEN, + dev_flow->hash_fields, + (*flow->rss.queue), + flow->rss.queue_num, + !!(dh->layers & + MLX5_FLOW_LAYER_TUNNEL)); } + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + hrxq_idx); if (!hrxq) { rte_flow_error_set (error, rte_errno, @@ -8118,8 +8122,8 @@ struct field_modify_info modify_tcp[] = { "cannot get hash queue"); goto error; } - dh->hrxq = hrxq; - dv->actions[n++] = dh->hrxq->action; + dh->hrxq = hrxq_idx; + dv->actions[n++] = hrxq->action; } dh->ib_flow = mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object, @@ -8152,7 +8156,7 @@ struct field_modify_info modify_tcp[] = { mlx5_hrxq_drop_release(dev); else mlx5_hrxq_release(dev, dh->hrxq); - dh->hrxq = NULL; + dh->hrxq = 0; } if (dh->vf_vlan.tag && dh->vf_vlan.created) mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); @@ -8416,7 +8420,7 @@ struct field_modify_info modify_tcp[] = { mlx5_hrxq_drop_release(dev); else mlx5_hrxq_release(dev, dh->hrxq); - dh->hrxq = NULL; + dh->hrxq = 0; } if (dh->vf_vlan.tag && dh->vf_vlan.created) mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index ccd3395..5f4b701 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -1656,7 +1656,7 @@ mlx5_hrxq_drop_release(dev); else mlx5_hrxq_release(dev, handle->hrxq); - handle->hrxq = NULL; + handle->hrxq = 0; } if (handle->vf_vlan.tag && handle->vf_vlan.created) mlx5_vlan_vmwa_release(dev, &handle->vf_vlan); @@ -1710,6 +1710,7 @@ struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_handle *handle; struct mlx5_flow *dev_flow; + struct mlx5_hrxq *hrxq; int err; int idx; @@ -1717,8 +1718,8 @@ dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx]; handle = dev_flow->handle; if (handle->act_flags & MLX5_FLOW_ACTION_DROP) { - handle->hrxq = mlx5_hrxq_drop_new(dev); - if (!handle->hrxq) { + hrxq = mlx5_hrxq_drop_new(dev); + if (!hrxq) { rte_flow_error_set (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -1726,22 +1727,24 @@ goto error; } } else { - struct mlx5_hrxq *hrxq; + uint32_t hrxq_idx; MLX5_ASSERT(flow->rss.queue); - hrxq = mlx5_hrxq_get(dev, flow->rss.key, + hrxq_idx = mlx5_hrxq_get(dev, flow->rss.key, MLX5_RSS_HASH_KEY_LEN, dev_flow->hash_fields, (*flow->rss.queue), flow->rss.queue_num); - if (!hrxq) - hrxq = mlx5_hrxq_new(dev, flow->rss.key, + if (!hrxq_idx) + hrxq_idx = mlx5_hrxq_new(dev, flow->rss.key, MLX5_RSS_HASH_KEY_LEN, dev_flow->hash_fields, (*flow->rss.queue), flow->rss.queue_num, !!(handle->layers & MLX5_FLOW_LAYER_TUNNEL)); + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + hrxq_idx); if (!hrxq) { rte_flow_error_set (error, rte_errno, @@ -1749,9 +1752,10 @@ "cannot get hash queue"); goto error; } - handle->hrxq = hrxq; + handle->hrxq = hrxq_idx; } - handle->ib_flow = mlx5_glue->create_flow(handle->hrxq->qp, + MLX5_ASSERT(hrxq); + handle->ib_flow = mlx5_glue->create_flow(hrxq->qp, &dev_flow->verbs.attr); if (!handle->ib_flow) { rte_flow_error_set(error, errno, @@ -1780,7 +1784,7 @@ mlx5_hrxq_drop_release(dev); else mlx5_hrxq_release(dev, handle->hrxq); - handle->hrxq = NULL; + handle->hrxq = 0; } if (handle->vf_vlan.tag && handle->vf_vlan.created) mlx5_vlan_vmwa_release(dev, &handle->vf_vlan); diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 0a95e3c..8203025 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -2381,9 +2381,9 @@ enum mlx5_rxq_type * Tunnel type. * * @return - * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. + * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set. */ -struct mlx5_hrxq * +uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, @@ -2392,6 +2392,7 @@ struct mlx5_hrxq * { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; + uint32_t hrxq_idx = 0; struct ibv_qp *qp = NULL; struct mlx5_ind_table_obj *ind_tbl; int err; @@ -2411,7 +2412,7 @@ struct mlx5_hrxq * } if (!ind_tbl) { rte_errno = ENOMEM; - return NULL; + return 0; } if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) { #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT @@ -2552,7 +2553,7 @@ struct mlx5_hrxq * goto error; } } - hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0); + hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx); if (!hrxq) goto error; hrxq->ind_table = ind_tbl; @@ -2581,8 +2582,9 @@ struct mlx5_hrxq * hrxq->hash_fields = hash_fields; memcpy(hrxq->rss_key, rss_key, rss_key_len); rte_atomic32_inc(&hrxq->refcnt); - LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next); - return hrxq; + ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx, + hrxq, next); + return hrxq_idx; error: err = rte_errno; /* Save rte_errno before cleanup. */ mlx5_ind_table_obj_release(dev, ind_tbl); @@ -2591,7 +2593,7 @@ struct mlx5_hrxq * else if (tir) claim_zero(mlx5_devx_cmd_destroy(tir)); rte_errno = err; /* Restore rte_errno. */ - return NULL; + return 0; } /** @@ -2608,9 +2610,9 @@ struct mlx5_hrxq * * Number of queues. * * @return - * An hash Rx queue on success. + * An hash Rx queue index on success. */ -struct mlx5_hrxq * +uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, @@ -2618,9 +2620,11 @@ struct mlx5_hrxq * { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; + uint32_t idx; queues_n = hash_fields ? queues_n : 1; - LIST_FOREACH(hrxq, &priv->hrxqs, next) { + ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx, + hrxq, next) { struct mlx5_ind_table_obj *ind_tbl; if (hrxq->rss_key_len != rss_key_len) @@ -2637,9 +2641,9 @@ struct mlx5_hrxq * continue; } rte_atomic32_inc(&hrxq->refcnt); - return hrxq; + return idx; } - return NULL; + return 0; } /** @@ -2648,14 +2652,20 @@ struct mlx5_hrxq * * @param dev * Pointer to Ethernet device. * @param hrxq - * Pointer to Hash Rx queue to release. + * Index to Hash Rx queue to release. * * @return * 1 while a reference on it exists, 0 when freed. */ int -mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) +mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx) { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq; + + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); + if (!hrxq) + return 0; if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { #ifdef HAVE_IBV_FLOW_DV_SUPPORT mlx5_glue->destroy_flow_action(hrxq->action); @@ -2665,8 +2675,9 @@ struct mlx5_hrxq * else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */ claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); mlx5_ind_table_obj_release(dev, hrxq->ind_table); - LIST_REMOVE(hrxq, next); - rte_free(hrxq); + ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, + hrxq_idx, hrxq, next); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); return 0; } claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table)); @@ -2687,9 +2698,11 @@ struct mlx5_hrxq * { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; + uint32_t idx; int ret = 0; - LIST_FOREACH(hrxq, &priv->hrxqs, next) { + ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx, + hrxq, next) { DRV_LOG(DEBUG, "port %u hash Rx queue %p still referenced", dev->data->port_id, (void *)hrxq); diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 939778a..b2d944b 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -231,7 +231,7 @@ struct mlx5_ind_table_obj { /* Hash Rx queue. */ struct mlx5_hrxq { - LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */ + ILIST_ENTRY(uint32_t)next; /* Index to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ struct mlx5_ind_table_obj *ind_table; /* Indirection table. */ RTE_STD_C11 @@ -406,16 +406,16 @@ struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new int mlx5_rxq_verify(struct rte_eth_dev *dev); int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl); int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev); -struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n, - int tunnel __rte_unused); -struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n); -int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq); +uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n, + int tunnel __rte_unused); +uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n); +int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx); int mlx5_hrxq_verify(struct rte_eth_dev *dev); enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx); struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev); -- 1.8.3.1