From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 90C64A09E9; Tue, 15 Dec 2020 04:46:37 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8F9081E2F; Tue, 15 Dec 2020 04:46:35 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id A3381A3 for ; Tue, 15 Dec 2020 04:46:33 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 15 Dec 2020 05:46:29 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 0BF3kRtB028703; Tue, 15 Dec 2020 05:46:27 +0200 From: Suanming Mou To: viacheslavo@nvidia.com, matan@nvidia.com Cc: rasland@nvidia.com, dev@dpdk.org, stable@dpdk.org Date: Tue, 15 Dec 2020 11:46:24 +0800 Message-Id: <1608003984-268333-1-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 Subject: [dpdk-dev] [PATCH] net/mlx5: fix shared RSS and mark actions combination X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" In order to allow mbuf mark ID update in Rx data-path, there is a mechanism in the PMD to enable it according to the rte_flows. When a flow with mark ID and RSS/QUEUE action exists, all the relevant Rx queues will be enabled to report the mark ID. When shared RSS action is combined with mark action, the PMD mechanism misses the Rx queues updates. This commit handles the shared RSS case in the mechanism too. Fixes: e1592b6 ("net/mlx5: make Rx queue thread safe") Cc: stable@dpdk.org Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_flow.c | 52 ++++++++++++++++------ drivers/net/mlx5/mlx5_flow_dv.c | 95 ++++++++++++++++------------------------- 2 files changed, 75 insertions(+), 72 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 82e24d7..40f8928 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -1001,17 +1001,29 @@ struct mlx5_flow_tunnel_info { struct mlx5_priv *priv = dev->data->dev_private; const int mark = dev_handle->mark; const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); - struct mlx5_hrxq *hrxq; + struct mlx5_ind_table_obj *ind_tbl = NULL; unsigned int i; - if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE) - return; - hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) { + struct mlx5_hrxq *hrxq; + + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], dev_handle->rix_hrxq); - if (!hrxq) + if (hrxq) + ind_tbl = hrxq->ind_table; + } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { + struct mlx5_shared_action_rss *shared_rss; + + shared_rss = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + dev_handle->rix_srss); + if (shared_rss) + ind_tbl = shared_rss->ind_tbl; + } + if (!ind_tbl) return; - for (i = 0; i != hrxq->ind_table->queues_n; ++i) { - int idx = hrxq->ind_table->queues[i]; + for (i = 0; i != ind_tbl->queues_n; ++i) { + int idx = ind_tbl->queues[i]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); @@ -1083,18 +1095,30 @@ struct mlx5_flow_tunnel_info { struct mlx5_priv *priv = dev->data->dev_private; const int mark = dev_handle->mark; const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); - struct mlx5_hrxq *hrxq; + struct mlx5_ind_table_obj *ind_tbl = NULL; unsigned int i; - if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE) - return; - hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) { + struct mlx5_hrxq *hrxq; + + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], dev_handle->rix_hrxq); - if (!hrxq) + if (hrxq) + ind_tbl = hrxq->ind_table; + } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { + struct mlx5_shared_action_rss *shared_rss; + + shared_rss = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + dev_handle->rix_srss); + if (shared_rss) + ind_tbl = shared_rss->ind_tbl; + } + if (!ind_tbl) return; MLX5_ASSERT(dev->data->dev_started); - for (i = 0; i != hrxq->ind_table->queues_n; ++i) { - int idx = hrxq->ind_table->queues[i]; + for (i = 0; i != ind_tbl->queues_n; ++i) { + int idx = ind_tbl->queues[i]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index c317376..18a7d03 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -10678,47 +10678,6 @@ struct mlx5_cache_entry * } /** - * Retrieves hash RX queue suitable for the *flow*. - * If shared action configured for *flow* suitable hash RX queue will be - * retrieved from attached shared action. - * - * @param[in] dev - * Pointer to the Ethernet device structure. - * @param[in] dev_flow - * Pointer to the sub flow. - * @param[in] rss_desc - * Pointer to the RSS descriptor. - * @param[out] hrxq - * Pointer to retrieved hash RX queue object. - * - * @return - * Valid hash RX queue index, otherwise 0 and rte_errno is set. - */ -static uint32_t -__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, - struct mlx5_flow_rss_desc *rss_desc, - struct mlx5_hrxq **hrxq) -{ - struct mlx5_priv *priv = dev->data->dev_private; - uint32_t hrxq_idx; - - if (rss_desc->shared_rss) { - hrxq_idx = __flow_dv_action_rss_hrxq_lookup - (dev, rss_desc->shared_rss, - dev_flow->hash_fields, - !!(dev_flow->handle->layers & - MLX5_FLOW_LAYER_TUNNEL)); - if (hrxq_idx) - *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], - hrxq_idx); - } else { - *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc, - &hrxq_idx); - } - return hrxq_idx; -} - -/** * Apply the flow to the NIC, lock free, * (mutex should be acquired by caller). * @@ -10749,11 +10708,6 @@ struct mlx5_cache_entry * struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc; MLX5_ASSERT(wks); - if (rss_desc->shared_rss) { - dh = wks->flows[wks->flow_idx - 1].handle; - MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS); - dh->rix_srss = rss_desc->shared_rss; - } for (idx = wks->flow_idx - 1; idx >= 0; idx--) { dev_flow = &wks->flows[idx]; dv = &dev_flow->dv; @@ -10769,11 +10723,34 @@ struct mlx5_cache_entry * priv->drop_queue.hrxq->action; } } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE && - !dv_h->rix_sample && !dv_h->rix_dest_array) || - (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) { + !dv_h->rix_sample && !dv_h->rix_dest_array)) { + struct mlx5_hrxq *hrxq; + uint32_t hrxq_idx; + + hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc, + &hrxq_idx); + if (!hrxq) { + rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot get hash queue"); + goto error; + } + dh->rix_hrxq = hrxq_idx; + dv->actions[n++] = hrxq->action; + } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { struct mlx5_hrxq *hrxq = NULL; - uint32_t hrxq_idx = __flow_dv_rss_get_hrxq - (dev, dev_flow, rss_desc, &hrxq); + uint32_t hrxq_idx; + + hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev, + rss_desc->shared_rss, + dev_flow->hash_fields, + !!(dh->layers & + MLX5_FLOW_LAYER_TUNNEL)); + if (hrxq_idx) + hrxq = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_HRXQ], + hrxq_idx); if (!hrxq) { rte_flow_error_set (error, rte_errno, @@ -10781,8 +10758,7 @@ struct mlx5_cache_entry * "cannot get hash queue"); goto error; } - if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) - dh->rix_hrxq = hrxq_idx; + dh->rix_srss = rss_desc->shared_rss; dv->actions[n++] = hrxq->action; } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) { if (!priv->sh->default_miss_action) { @@ -10824,12 +10800,12 @@ struct mlx5_cache_entry * if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) { mlx5_hrxq_release(dev, dh->rix_hrxq); dh->rix_hrxq = 0; + } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { + dh->rix_srss = 0; } if (dh->vf_vlan.tag && dh->vf_vlan.created) mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); } - if (rss_desc->shared_rss) - wks->flows[wks->flow_idx - 1].handle->rix_srss = 0; rte_errno = err; /* Restore rte_errno. */ return -rte_errno; } @@ -11097,9 +11073,6 @@ struct mlx5_cache_entry * flow_dv_port_id_action_resource_release(dev, handle->rix_port_id_action); break; - case MLX5_FLOW_FATE_SHARED_RSS: - flow_dv_shared_rss_action_release(dev, handle->rix_srss); - break; default: DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action); break; @@ -11262,6 +11235,7 @@ struct mlx5_cache_entry * { struct mlx5_flow_handle *dev_handle; struct mlx5_priv *priv = dev->data->dev_private; + uint32_t srss = 0; if (!flow) return; @@ -11306,10 +11280,15 @@ struct mlx5_cache_entry * if (dev_handle->dvh.rix_tag) flow_dv_tag_release(dev, dev_handle->dvh.rix_tag); - flow_dv_fate_resource_release(dev, dev_handle); + if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS) + flow_dv_fate_resource_release(dev, dev_handle); + else if (!srss) + srss = dev_handle->rix_srss; mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], tmp_idx); } + if (srss) + flow_dv_shared_rss_action_release(dev, srss); } /** -- 1.8.3.1