DPDK patches and discussions
 help / color / mirror / Atom feed
From: dapengx.yu@intel.com
To: Qiming Yang <qiming.yang@intel.com>, Qi Zhang <qi.z.zhang@intel.com>
Cc: dev@dpdk.org, haiyue.wang@intel.com,
	Dapeng Yu <dapengx.yu@intel.com>,
	stable@dpdk.org
Subject: [dpdk-dev] [PATCH v4 2/2] net/ice: fix flow redirect failure
Date: Thu,  4 Nov 2021 16:45:35 +0800	[thread overview]
Message-ID: <20211104084535.1552536-2-dapengx.yu@intel.com> (raw)
In-Reply-To: <20211104084535.1552536-1-dapengx.yu@intel.com>

From: Dapeng Yu <dapengx.yu@intel.com>

It's possible that a switch rule can't be redirect successfully due
to kernel driver is busy to handle an ongoing VF reset, so the
redirect action need to be deferred into next redirect request which
is promised by kernel driver after VF reset done.

This patch uses the saved flow rule's data to replay switch rule
remove/add during next flow redirect.

Fixes: 397b4b3c5095 ("net/ice: enable flow redirect on switch")
Cc: stable@dpdk.org

Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
---
V2:
* Add more filter status and VSI number
V3:
* Use switch statement to make code clear
V4:
* Trim leading space
---
 drivers/net/ice/ice_switch_filter.c | 108 ++++++++++++++++++++--------
 1 file changed, 78 insertions(+), 30 deletions(-)

diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index d5add64c53..ed29c00d77 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1926,8 +1926,12 @@ ice_switch_redirect(struct ice_adapter *ad,
 		    struct rte_flow *flow,
 		    struct ice_flow_redirect *rd)
 {
-	struct ice_rule_query_data *rdata = flow->rule;
+	struct ice_rule_query_data *rdata;
+	struct ice_switch_filter_conf *filter_conf_ptr =
+		(struct ice_switch_filter_conf *)flow->rule;
+	struct ice_rule_query_data added_rdata = { 0 };
 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
+	struct ice_adv_lkup_elem *lkups_ref = NULL;
 	struct ice_adv_lkup_elem *lkups_dp = NULL;
 	struct LIST_HEAD_TYPE *list_head;
 	struct ice_adv_rule_info rinfo;
@@ -1936,6 +1940,8 @@ ice_switch_redirect(struct ice_adapter *ad,
 	uint16_t lkups_cnt;
 	int ret;
 
+	rdata = &filter_conf_ptr->sw_query_data;
+
 	if (rdata->vsi_handle != rd->vsi_handle)
 		return 0;
 
@@ -1946,56 +1952,98 @@ ice_switch_redirect(struct ice_adapter *ad,
 	if (rd->type != ICE_FLOW_REDIRECT_VSI)
 		return -ENOTSUP;
 
-	list_head = &sw->recp_list[rdata->rid].filt_rules;
-	LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
-			    list_entry) {
-		rinfo = list_itr->rule_info;
-		if ((rinfo.fltr_rule_id == rdata->rule_id &&
-		    rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
-		    rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
-		    (rinfo.fltr_rule_id == rdata->rule_id &&
-		    rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
-			lkups_cnt = list_itr->lkups_cnt;
-			lkups_dp = (struct ice_adv_lkup_elem *)
-				ice_memdup(hw, list_itr->lkups,
-					   sizeof(*list_itr->lkups) *
-					   lkups_cnt, ICE_NONDMA_TO_NONDMA);
-
-			if (!lkups_dp) {
-				PMD_DRV_LOG(ERR, "Failed to allocate memory.");
-				return -EINVAL;
-			}
+	switch (filter_conf_ptr->fltr_status) {
+	case ICE_SW_FLTR_ADDED:
+		list_head = &sw->recp_list[rdata->rid].filt_rules;
+		LIST_FOR_EACH_ENTRY(list_itr, list_head,
+				    ice_adv_fltr_mgmt_list_entry,
+				    list_entry) {
+			rinfo = list_itr->rule_info;
+			if ((rinfo.fltr_rule_id == rdata->rule_id &&
+			    rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
+			    rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
+			    (rinfo.fltr_rule_id == rdata->rule_id &&
+			    rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
+				lkups_cnt = list_itr->lkups_cnt;
+
+				lkups_dp = (struct ice_adv_lkup_elem *)
+					ice_memdup(hw, list_itr->lkups,
+						   sizeof(*list_itr->lkups) *
+						   lkups_cnt,
+						   ICE_NONDMA_TO_NONDMA);
+				if (!lkups_dp) {
+					PMD_DRV_LOG(ERR,
+						    "Failed to allocate memory.");
+					return -EINVAL;
+				}
+				lkups_ref = lkups_dp;
 
-			if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
-				rinfo.sw_act.vsi_handle = rd->vsi_handle;
-				rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
+				if (rinfo.sw_act.fltr_act ==
+				    ICE_FWD_TO_VSI_LIST) {
+					rinfo.sw_act.vsi_handle =
+						rd->vsi_handle;
+					rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
+				}
+				break;
 			}
-			break;
 		}
-	}
 
-	if (!lkups_dp)
+		if (!lkups_ref)
+			return -EINVAL;
+
+		goto rmv_rule;
+	case ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT:
+		/* Recover VSI context */
+		hw->vsi_ctx[rd->vsi_handle]->vsi_num = filter_conf_ptr->vsi_num;
+		rinfo = filter_conf_ptr->rule_info;
+		lkups_cnt = filter_conf_ptr->lkups_num;
+		lkups_ref = filter_conf_ptr->lkups;
+
+		if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
+			rinfo.sw_act.vsi_handle = rd->vsi_handle;
+			rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
+		}
+
+		goto rmv_rule;
+	case ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT:
+		rinfo = filter_conf_ptr->rule_info;
+		lkups_cnt = filter_conf_ptr->lkups_num;
+		lkups_ref = filter_conf_ptr->lkups;
+
+		goto add_rule;
+	default:
 		return -EINVAL;
+	}
 
+rmv_rule:
 	/* Remove the old rule */
-	ret = ice_rem_adv_rule(hw, list_itr->lkups,
-			       lkups_cnt, &rinfo);
+	ret = ice_rem_adv_rule(hw, lkups_ref, lkups_cnt, &rinfo);
 	if (ret) {
 		PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
 			    rdata->rule_id);
+		filter_conf_ptr->fltr_status =
+			ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT;
 		ret = -EINVAL;
 		goto out;
 	}
 
+add_rule:
 	/* Update VSI context */
 	hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
 
 	/* Replay the rule */
-	ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
-			       &rinfo, rdata);
+	ret = ice_add_adv_rule(hw, lkups_ref, lkups_cnt,
+			       &rinfo, &added_rdata);
 	if (ret) {
 		PMD_DRV_LOG(ERR, "Failed to replay the rule");
+		filter_conf_ptr->fltr_status =
+			ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT;
 		ret = -EINVAL;
+	} else {
+		filter_conf_ptr->sw_query_data = added_rdata;
+		/* Save VSI number for failure recover */
+		filter_conf_ptr->vsi_num = rd->new_vsi_num;
+		filter_conf_ptr->fltr_status = ICE_SW_FLTR_ADDED;
 	}
 
 out:
-- 
2.27.0


  reply	other threads:[~2021-11-04  8:45 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-01  8:45 [dpdk-dev] [PATCH 1/2] net/ice: save meta on switch filter creation dapengx.yu
2021-11-01  8:45 ` [dpdk-dev] [PATCH 2/2] net/ice: fix flow redirect failure dapengx.yu
2021-11-02  0:06 ` [dpdk-dev] [PATCH 1/2] net/ice: save meta on switch filter creation Zhang, Qi Z
2021-11-02  0:08   ` Zhang, Qi Z
2021-11-02 16:11 ` [dpdk-dev] [dpdk-stable] " Ferruh Yigit
2021-11-03  3:25   ` Yu, DapengX
2021-11-03 10:05 ` [dpdk-dev] [PATCH v2 1/2] net/ice: save rule " dapengx.yu
2021-11-03 10:05   ` [dpdk-dev] [PATCH v2 2/2] net/ice: fix flow redirect failure dapengx.yu
2021-11-04  8:17   ` [dpdk-dev] [PATCH v3 1/2] net/ice: save rule on switch filter creation dapengx.yu
2021-11-04  8:17     ` [dpdk-dev] [PATCH v3 2/2] net/ice: fix flow redirect failure dapengx.yu
2021-11-04  8:45     ` [dpdk-dev] [PATCH v4 1/2] net/ice: save rule on switch filter creation dapengx.yu
2021-11-04  8:45       ` dapengx.yu [this message]
2021-11-04 11:12         ` [dpdk-dev] [PATCH v4 2/2] net/ice: fix flow redirect failure Zhang, Qi Z
2021-11-04 11:10       ` [dpdk-dev] [PATCH v4 1/2] net/ice: save rule on switch filter creation Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211104084535.1552536-2-dapengx.yu@intel.com \
    --to=dapengx.yu@intel.com \
    --cc=dev@dpdk.org \
    --cc=haiyue.wang@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).