From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 76738A04F0; Tue, 17 Dec 2019 05:18:57 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id AB0AA1BE9D; Tue, 17 Dec 2019 05:18:37 +0100 (CET) Received: from relay.smtp.broadcom.com (relay.smtp.broadcom.com [192.19.232.149]) by dpdk.org (Postfix) with ESMTP id 574D42C19 for ; Tue, 17 Dec 2019 05:18:31 +0100 (CET) Received: from dhcp-10-123-153-55.dhcp.broadcom.net (bgccx-dev-host-lnx35.bec.broadcom.net [10.123.153.55]) by relay.smtp.broadcom.com (Postfix) with ESMTP id 0FEDD1BE6C5; Mon, 16 Dec 2019 20:18:29 -0800 (PST) DKIM-Filter: OpenDKIM Filter v2.10.3 relay.smtp.broadcom.com 0FEDD1BE6C5 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com; s=dkimrelay; t=1576556310; bh=V9yr6rfCMMx5ok3S7nxnQrEAePQ+b4ZsA9q/T6TLmkQ=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Mv1YC4LTNvm/hCsrtbOBSymrHrhKzRaxGIzo/UjtK1sLOl1LYZSulJPjXl4pgp9WL rxhedJ/Dfxiwy5gYAyEX0F5wJzlwqAA0FDCUtsyASWz+koRccZR96iW11o0OJby+d/ jdWPqJ9hVM6OgEe8RpxYCrWQdVvjmvzuvnNXA+XI= From: Somnath Kotur To: dev@dpdk.org Cc: ferruh.yigit@intel.com Date: Tue, 17 Dec 2019 09:47:52 +0530 Message-Id: <20191217041755.29232-4-somnath.kotur@broadcom.com> X-Mailer: git-send-email 2.10.1.613.g2cc2e70 In-Reply-To: <20191217041755.29232-1-somnath.kotur@broadcom.com> References: <20191217041755.29232-1-somnath.kotur@broadcom.com> Subject: [dpdk-dev] [PATCH 3/6] net/bnxt: fix flow flush to sync with flow destroy routine X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Sync flow flush routine with flow destroy so that the operations performed per flow during a flush are the same as that are done for an individual flow destroy by having a common function to call for both. One of the things that was missed in the flow flush routine was the deletion of the L2 filter that would have been created as part of an n-tuple filter. Also, decrement the l2_ref_cnt for a filter in the case of a filter update as it would've bumped up previously in validate_and_parse_flow() Fixes: a0800839 ("net/bnxt: handle flow flush handling") Signed-off-by: Somnath Kotur Reviewed-by: Santoshkumar Karanappa Rastapur --- drivers/net/bnxt/bnxt_flow.c | 132 +++++++++++++++---------------------------- 1 file changed, 46 insertions(+), 86 deletions(-) diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c index 4381cd7..59e55c3 100644 --- a/drivers/net/bnxt/bnxt_flow.c +++ b/drivers/net/bnxt/bnxt_flow.c @@ -1537,10 +1537,13 @@ struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp, * filter which points to the new destination queue and so we clear * the previous L2 filter. For ntuple filters, we are going to reuse * the old L2 filter and create new NTUPLE filter with this new - * destination queue subsequently during bnxt_flow_create. + * destination queue subsequently during bnxt_flow_create. So we + * decrement the ref cnt of the L2 filter that would've been bumped + * up previously in bnxt_validate_and_parse_flow as the old n-tuple + * filter that was referencing it will be deleted now. */ + bnxt_hwrm_clear_l2_filter(bp, old_filter); if (new_filter->filter_type == HWRM_CFA_L2_FILTER) { - bnxt_hwrm_clear_l2_filter(bp, old_filter); bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter); } else { if (new_filter->filter_type == HWRM_CFA_EM_FILTER) @@ -1828,46 +1831,24 @@ static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp, } static int -bnxt_flow_destroy(struct rte_eth_dev *dev, - struct rte_flow *flow, - struct rte_flow_error *error) +_bnxt_flow_destroy(struct bnxt *bp, + struct rte_flow *flow, + struct rte_flow_error *error) { - struct bnxt *bp = dev->data->dev_private; struct bnxt_filter_info *filter; struct bnxt_vnic_info *vnic; int ret = 0; - bnxt_acquire_flow_lock(bp); - if (!flow) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Invalid flow: failed to destroy flow."); - bnxt_release_flow_lock(bp); - return -EINVAL; - } - filter = flow->filter; vnic = flow->vnic; - if (!filter) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Invalid flow: failed to destroy flow."); - bnxt_release_flow_lock(bp); - return -EINVAL; - } - if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER && filter->enables == filter->tunnel_type) { - ret = bnxt_handle_tunnel_redirect_destroy(bp, - filter, - error); - if (!ret) { + ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error); + if (!ret) goto done; - } else { - bnxt_release_flow_lock(bp); + else return ret; - } } ret = bnxt_match_filter(bp, filter); @@ -1919,7 +1900,36 @@ static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp, "Failed to destroy flow."); } + return ret; +} + +static int +bnxt_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct bnxt *bp = dev->data->dev_private; + int ret = 0; + + bnxt_acquire_flow_lock(bp); + if (!flow) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Invalid flow: failed to destroy flow."); + bnxt_release_flow_lock(bp); + return -EINVAL; + } + + if (!flow->filter) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Invalid flow: failed to destroy flow."); + bnxt_release_flow_lock(bp); + return -EINVAL; + } + ret = _bnxt_flow_destroy(bp, flow, error); bnxt_release_flow_lock(bp); + return ret; } @@ -1927,7 +1937,6 @@ static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp, bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct bnxt *bp = dev->data->dev_private; - struct bnxt_filter_info *filter = NULL; struct bnxt_vnic_info *vnic; struct rte_flow *flow; unsigned int i; @@ -1941,66 +1950,17 @@ static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp, while (!STAILQ_EMPTY(&vnic->flow_list)) { flow = STAILQ_FIRST(&vnic->flow_list); - filter = flow->filter; - - if (filter->filter_type == - HWRM_CFA_TUNNEL_REDIRECT_FILTER && - filter->enables == filter->tunnel_type) { - ret = - bnxt_handle_tunnel_redirect_destroy(bp, - filter, - error); - if (!ret) { - goto done; - } else { - bnxt_release_flow_lock(bp); - return ret; - } - } - - if (filter->filter_type == HWRM_CFA_EM_FILTER) - ret = bnxt_hwrm_clear_em_filter(bp, filter); - if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) - ret = bnxt_hwrm_clear_ntuple_filter(bp, filter); - else if (i) - ret = bnxt_hwrm_clear_l2_filter(bp, filter); - if (ret) { - rte_flow_error_set - (error, - -ret, - RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, - "Failed to flush flow in HW."); - bnxt_release_flow_lock(bp); - return -rte_errno; - } -done: - STAILQ_REMOVE(&vnic->flow_list, flow, - rte_flow, next); - - STAILQ_REMOVE(&vnic->filter, - filter, - bnxt_filter_info, - next); - bnxt_free_filter(bp, filter); - - rte_free(flow); + if (!flow->filter) + continue; - /* If this was the last flow associated with this vnic, - * switch the queue back to RSS pool. - */ - if (STAILQ_EMPTY(&vnic->flow_list)) { - rte_free(vnic->fw_grp_ids); - if (vnic->rx_queue_cnt > 1) - bnxt_hwrm_vnic_ctx_free(bp, vnic); - bnxt_hwrm_vnic_free(bp, vnic); - vnic->rx_queue_cnt = 0; - } + ret = _bnxt_flow_destroy(bp, flow, error); + if (ret) + break; } } - bnxt_release_flow_lock(bp); + return ret; } -- 1.8.3.1