From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4DAA9A04BB; Tue, 6 Oct 2020 13:55:15 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 62CA11BAA8; Tue, 6 Oct 2020 13:49:49 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 261871B9EC for ; Tue, 6 Oct 2020 13:49:45 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 6 Oct 2020 14:49:42 +0300 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 096BnC0a028553; Tue, 6 Oct 2020 14:49:41 +0300 From: Suanming Mou To: viacheslavo@nvidia.com, matan@nvidia.com Cc: rasland@nvidia.com, dev@dpdk.org Date: Tue, 6 Oct 2020 19:48:59 +0800 Message-Id: <1601984948-313027-17-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH 16/25] net/mlx5: make Rx queue thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit applies the cache linked list to Rx queue to make it thread safe. Signed-off-by: Suanming Mou --- drivers/net/mlx5/linux/mlx5_os.c | 5 + drivers/net/mlx5/mlx5.c | 1 + drivers/net/mlx5/mlx5.h | 24 +++- drivers/net/mlx5/mlx5_flow.h | 16 --- drivers/net/mlx5/mlx5_flow_dv.c | 20 +--- drivers/net/mlx5/mlx5_flow_verbs.c | 19 +-- drivers/net/mlx5/mlx5_rxq.c | 234 ++++++++++++++++++------------------- drivers/net/mlx5/mlx5_rxtx.h | 20 ++-- 8 files changed, 164 insertions(+), 175 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 24cf348..db7b0de 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -1335,6 +1335,10 @@ err = ENOTSUP; goto error; } + mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev, + mlx5_hrxq_create_cb, + mlx5_hrxq_match_cb, + mlx5_hrxq_remove_cb); /* Query availability of metadata reg_c's. */ err = mlx5_flow_discover_mreg_c(eth_dev); if (err < 0) { @@ -1381,6 +1385,7 @@ mlx5_vlan_vmwa_exit(priv->vmwa_context); if (own_domain_id) claim_zero(rte_eth_switch_domain_free(priv->domain_id)); + mlx5_cache_list_destroy(&priv->hrxqs); mlx5_free(priv); if (eth_dev != NULL) eth_dev->data->dev_private = NULL; diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 61e5e69..fc9c5a9 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1219,6 +1219,7 @@ struct mlx5_dev_ctx_shared * close(priv->nl_socket_rdma); if (priv->vmwa_context) mlx5_vlan_vmwa_exit(priv->vmwa_context); + mlx5_cache_list_destroy(&priv->hrxqs); ret = mlx5_hrxq_verify(dev); if (ret) DRV_LOG(WARNING, "port %u some hash Rx queue still remain", diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index f11d783..97729a8 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -61,6 +61,13 @@ enum mlx5_reclaim_mem_mode { MLX5_RCM_AGGR, /* Reclaim PMD and rdma-core level. */ }; +/* Hash list callback context */ +struct mlx5_flow_cb_ctx { + struct rte_eth_dev *dev; + struct rte_flow_error *error; + void *data; +}; + /* Device attributes used in mlx5 PMD */ struct mlx5_dev_attr { uint64_t device_cap_flags_ex; @@ -664,6 +671,18 @@ struct mlx5_proc_priv { /* MTR list. */ TAILQ_HEAD(mlx5_flow_meters, mlx5_flow_meter); +/* RSS description. */ +struct mlx5_flow_rss_desc { + uint32_t level; + uint32_t queue_num; /**< Number of entries in @p queue. */ + uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */ + uint64_t hash_fields; /* Verbs Hash fields. */ + uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ + uint32_t key_len; /**< RSS hash key len. */ + uint32_t tunnel; /**< Queue in tunnel. */ + uint16_t *queue; /**< Destination queues. */ +}; + #define MLX5_PROC_PRIV(port_id) \ ((struct mlx5_proc_priv *)rte_eth_devices[port_id].process_private) @@ -710,7 +729,7 @@ struct mlx5_ind_table_obj { /* Hash Rx queue. */ struct mlx5_hrxq { - ILIST_ENTRY(uint32_t)next; /* Index to the next element. */ + struct mlx5_cache_entry entry; /* Cache entry. */ rte_atomic32_t refcnt; /* Reference counter. */ struct mlx5_ind_table_obj *ind_table; /* Indirection table. */ RTE_STD_C11 @@ -723,6 +742,7 @@ struct mlx5_hrxq { #endif uint64_t hash_fields; /* Verbs Hash fields. */ uint32_t rss_key_len; /* Hash key length in bytes. */ + uint32_t idx; /* Hash Rx queue index*/ uint8_t rss_key[]; /* Hash key. */ }; @@ -788,7 +808,7 @@ struct mlx5_priv { struct mlx5_obj_ops obj_ops; /* HW objects operations. */ LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */ - uint32_t hrxqs; /* Verbs Hash Rx queues. */ + struct mlx5_cache_list hrxqs; /* Verbs Hash Rx queues. */ LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */ LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */ /* Indirection tables. */ diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 1fe0b30..6ec0e72 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -368,13 +368,6 @@ enum mlx5_flow_fate_type { MLX5_FLOW_FATE_MAX, }; -/* Hash list callback context */ -struct mlx5_flow_cb_ctx { - struct rte_eth_dev *dev; - struct rte_flow_error *error; - void *data; -}; - /* Matcher PRM representation */ struct mlx5_flow_dv_match_params { size_t size; @@ -532,15 +525,6 @@ struct ibv_spec_header { uint16_t size; }; -/* RSS description. */ -struct mlx5_flow_rss_desc { - uint32_t level; - uint32_t queue_num; /**< Number of entries in @p queue. */ - uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */ - uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ - uint16_t *queue; /**< Destination queues. */ -}; - /* PMD flow priority for tunnel */ #define MLX5_TUNNEL_PRIO_GET(rss_desc) \ ((rss_desc)->level >= 2 ? MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4) diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index b884d8c..5092130 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -8981,21 +8981,11 @@ struct mlx5_hlist_entry * [!!wks->flow_nested_idx]; MLX5_ASSERT(rss_desc->queue_num); - hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num); - if (!hrxq_idx) { - hrxq_idx = mlx5_hrxq_new - (dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num, - !!(dh->layers & - MLX5_FLOW_LAYER_TUNNEL)); - } + rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN; + rss_desc->hash_fields = dev_flow->hash_fields; + rss_desc->tunnel = !!(dh->layers & + MLX5_FLOW_LAYER_TUNNEL); + hrxq_idx = mlx5_hrxq_get(dev, rss_desc); hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); if (!hrxq) { diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index b649960..905da8a 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -1984,20 +1984,11 @@ &wks->rss_desc[!!wks->flow_nested_idx]; MLX5_ASSERT(rss_desc->queue_num); - hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num); - if (!hrxq_idx) - hrxq_idx = mlx5_hrxq_new - (dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num, - !!(handle->layers & - MLX5_FLOW_LAYER_TUNNEL)); + rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN; + rss_desc->hash_fields = dev_flow->hash_fields; + rss_desc->tunnel = !!(handle->layers & + MLX5_FLOW_LAYER_TUNNEL); + hrxq_idx = mlx5_hrxq_get(dev, rss_desc); hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); if (!hrxq) { diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index c059e21..0c45ca0 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1853,156 +1853,164 @@ struct mlx5_ind_table_obj * } /** - * Get an Rx Hash queue. + * Match an Rx Hash queue. * - * @param dev - * Pointer to Ethernet device. - * @param rss_conf - * RSS configuration for the Rx hash queue. - * @param queues - * Queues entering in hash queue. In case of empty hash_fields only the - * first queue index will be taken for the indirection table. - * @param queues_n - * Number of queues. + * @param list + * Cache list pointer. + * @param entry + * Hash queue entry pointer. + * @param cb_ctx + * Context of the callback function. * * @return - * An hash Rx queue index on success. + * 0 if match, none zero if not match. */ -uint32_t -mlx5_hrxq_get(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n) +int +mlx5_hrxq_match_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, + void *cb_ctx) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hrxq *hrxq; - uint32_t idx; - - queues_n = hash_fields ? queues_n : 1; - ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx, - hrxq, next) { - struct mlx5_ind_table_obj *ind_tbl; + struct rte_eth_dev *dev = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_rss_desc *rss_desc = ctx->data; + struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry); + struct mlx5_ind_table_obj *ind_tbl; + uint32_t queues_n; - if (hrxq->rss_key_len != rss_key_len) - continue; - if (memcmp(hrxq->rss_key, rss_key, rss_key_len)) - continue; - if (hrxq->hash_fields != hash_fields) - continue; - ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); - if (!ind_tbl) - continue; - if (ind_tbl != hrxq->ind_table) { - mlx5_ind_table_obj_release(dev, ind_tbl); - continue; - } - rte_atomic32_inc(&hrxq->refcnt); - return idx; - } - return 0; + if (hrxq->rss_key_len != rss_desc->key_len || + memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) || + hrxq->hash_fields != rss_desc->hash_fields) + return 1; + queues_n = rss_desc->hash_fields ? rss_desc->queue_num : 1; + ind_tbl = mlx5_ind_table_obj_get(dev, rss_desc->queue, queues_n); + if (ind_tbl) + mlx5_ind_table_obj_release(dev, ind_tbl); + return ind_tbl != hrxq->ind_table; } /** - * Release the hash Rx queue. - * - * @param dev - * Pointer to Ethernet device. - * @param hrxq - * Index to Hash Rx queue to release. + * Remove the Rx Hash queue. * - * @return - * 1 while a reference on it exists, 0 when freed. + * @param list + * Cache list pointer. + * @param entry + * Hash queue entry pointer. */ -int -mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx) +void +mlx5_hrxq_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) { + struct rte_eth_dev *dev = list->ctx; struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hrxq *hrxq; + struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry); - hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); - if (!hrxq) - return 0; - if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { #ifdef HAVE_IBV_FLOW_DV_SUPPORT - mlx5_glue->destroy_flow_action(hrxq->action); + mlx5_glue->destroy_flow_action(hrxq->action); #endif - priv->obj_ops.hrxq_destroy(hrxq); - mlx5_ind_table_obj_release(dev, hrxq->ind_table); - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, - hrxq_idx, hrxq, next); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); - return 0; - } - claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table)); - return 1; + priv->obj_ops.hrxq_destroy(hrxq); + mlx5_ind_table_obj_release(dev, hrxq->ind_table); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx); } /** * Create an Rx Hash queue. * - * @param dev - * Pointer to Ethernet device. - * @param rss_key - * RSS key for the Rx hash queue. - * @param rss_key_len - * RSS key length. - * @param hash_fields - * Verbs protocol hash field to make the RSS on. - * @param queues - * Queues entering in hash queue. In case of empty hash_fields only the - * first queue index will be taken for the indirection table. - * @param queues_n - * Number of queues. - * @param tunnel - * Tunnel type. + * @param list + * Cache list pointer. + * @param entry + * Hash queue entry pointer. + * @param cb_ctx + * Context of the callback function. * * @return - * The DevX object initialized index, 0 otherwise and rte_errno is set. + * queue entry on success, NULL otherwise. */ -uint32_t -mlx5_hrxq_new(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n, - int tunnel __rte_unused) +struct mlx5_cache_entry * +mlx5_hrxq_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) { + struct rte_eth_dev *dev = list->ctx; struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_rss_desc *rss_desc = ctx->data; + const uint8_t *rss_key = rss_desc->key; + uint32_t rss_key_len = rss_desc->key_len; + const uint16_t *queues = rss_desc->queue; + uint32_t queues_n = rss_desc->queue_num; struct mlx5_hrxq *hrxq = NULL; uint32_t hrxq_idx = 0; struct mlx5_ind_table_obj *ind_tbl; int ret; - queues_n = hash_fields ? queues_n : 1; + queues_n = rss_desc->hash_fields ? queues_n : 1; ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n); - if (!ind_tbl) { - rte_errno = ENOMEM; - return 0; - } + if (!ind_tbl) + return NULL; hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx); if (!hrxq) goto error; + hrxq->idx = hrxq_idx; hrxq->ind_table = ind_tbl; hrxq->rss_key_len = rss_key_len; - hrxq->hash_fields = hash_fields; + hrxq->hash_fields = rss_desc->hash_fields; memcpy(hrxq->rss_key, rss_key, rss_key_len); - ret = priv->obj_ops.hrxq_new(dev, hrxq, tunnel); - if (ret < 0) { - rte_errno = errno; + ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel); + if (ret < 0) goto error; - } - rte_atomic32_inc(&hrxq->refcnt); - ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx, - hrxq, next); - return hrxq_idx; + return &hrxq->entry; error: - ret = rte_errno; /* Save rte_errno before cleanup. */ mlx5_ind_table_obj_release(dev, ind_tbl); if (hrxq) mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); - rte_errno = ret; /* Restore rte_errno. */ - return 0; + return NULL; +} + +/** + * Get an Rx Hash queue. + * + * @param dev + * Pointer to Ethernet device. + * @param rss_desc + * RSS configuration for the Rx hash queue. + * + * @return + * An hash Rx queue index on success. + */ +uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, + struct mlx5_flow_rss_desc *rss_desc) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq; + struct mlx5_cache_entry *entry; + struct mlx5_flow_cb_ctx ctx = { + .data = rss_desc, + }; + + entry = mlx5_cache_register(&priv->hrxqs, &ctx); + if (!entry) + return 0; + hrxq = container_of(entry, typeof(*hrxq), entry); + return hrxq->idx; +} + +/** + * Release the hash Rx queue. + * + * @param dev + * Pointer to Ethernet device. + * @param hrxq_idx + * Index to Hash Rx queue to release. + */ +void mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq; + + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); + mlx5_cache_unregister(&priv->hrxqs, &hrxq->entry); } /** @@ -2087,21 +2095,9 @@ struct mlx5_hrxq * * The number of object not released. */ int -mlx5_hrxq_verify(struct rte_eth_dev *dev) +mlx5_hrxq_verify(struct rte_eth_dev *dev __rte_unused) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hrxq *hrxq; - uint32_t idx; - int ret = 0; - - ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx, - hrxq, next) { - DRV_LOG(DEBUG, - "port %u hash Rx queue %p still referenced", - dev->data->port_id, (void *)hrxq); - ++ret; - } - return ret; + return 0; } /** diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 9ffa028..6f603e2 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -370,17 +370,19 @@ struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev, uint32_t queues_n); int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, struct mlx5_ind_table_obj *ind_tbl); -uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n, - int tunnel __rte_unused); +struct mlx5_cache_entry *mlx5_hrxq_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, void *cb_ctx); +int mlx5_hrxq_match_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, + void *cb_ctx); +void mlx5_hrxq_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry); uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n); -int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx); + struct mlx5_flow_rss_desc *rss_desc); +void mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx); int mlx5_hrxq_verify(struct rte_eth_dev *dev); + + enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx); struct mlx5_hrxq *mlx5_drop_action_create(struct rte_eth_dev *dev); void mlx5_drop_action_destroy(struct rte_eth_dev *dev); -- 1.8.3.1