DPDK patches and discussions
 help / color / mirror / Atom feed
From: Andrew Rybchenko <arybchenko@solarflare.com>
To: <dev@dpdk.org>
Cc: Ivan Malov <ivan.malov@oktetlabs.ru>
Subject: [dpdk-dev] [PATCH 6/7] net/sfc: generalise flow start and stop path
Date: Thu, 5 Mar 2020 10:47:52 +0000	[thread overview]
Message-ID: <1583405273-14176-7-git-send-email-arybchenko@solarflare.com> (raw)
In-Reply-To: <1583405273-14176-1-git-send-email-arybchenko@solarflare.com>

From: Ivan Malov <ivan.malov@oktetlabs.ru>

As a preparation step, generalise flow start and stop path using callbacks.

Signed-off-by: Ivan Malov <ivan.malov@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
---
 drivers/net/sfc/sfc_flow.c | 113 ++++++++++++++++++++++++-------------
 drivers/net/sfc/sfc_flow.h |   6 ++
 2 files changed, 81 insertions(+), 38 deletions(-)

diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index ed91c3cef..b95acff31 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -26,12 +26,18 @@
 
 struct sfc_flow_ops_by_spec {
 	sfc_flow_parse_cb_t	*parse;
+	sfc_flow_insert_cb_t	*insert;
+	sfc_flow_remove_cb_t	*remove;
 };
 
 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
+static sfc_flow_insert_cb_t sfc_flow_filter_insert;
+static sfc_flow_remove_cb_t sfc_flow_filter_remove;
 
 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
 	.parse = sfc_flow_parse_rte_to_filter,
+	.insert = sfc_flow_filter_insert,
+	.remove = sfc_flow_filter_remove,
 };
 
 static const struct sfc_flow_ops_by_spec *
@@ -2379,6 +2385,54 @@ sfc_flow_free(__rte_unused struct sfc_adapter *sa, struct rte_flow *flow)
 	rte_free(flow);
 }
 
+static int
+sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	const struct sfc_flow_ops_by_spec *ops;
+	int rc;
+
+	ops = sfc_flow_get_ops_by_spec(flow);
+	if (ops == NULL || ops->insert == NULL) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "No backend to handle this flow");
+		return rte_errno;
+	}
+
+	rc = ops->insert(sa, flow);
+	if (rc != 0) {
+		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Failed to insert the flow rule");
+	}
+
+	return rc;
+}
+
+static int
+sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	const struct sfc_flow_ops_by_spec *ops;
+	int rc;
+
+	ops = sfc_flow_get_ops_by_spec(flow);
+	if (ops == NULL || ops->remove == NULL) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "No backend to handle this flow");
+		return rte_errno;
+	}
+
+	rc = ops->remove(sa, flow);
+	if (rc != 0) {
+		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Failed to remove the flow rule");
+	}
+
+	return rc;
+}
+
 static int
 sfc_flow_validate(struct rte_eth_dev *dev,
 		  const struct rte_flow_attr *attr,
@@ -2425,20 +2479,16 @@ sfc_flow_create(struct rte_eth_dev *dev,
 	TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
 
 	if (sa->state == SFC_ADAPTER_STARTED) {
-		rc = sfc_flow_filter_insert(sa, flow);
-		if (rc != 0) {
-			rte_flow_error_set(error, rc,
-				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				"Failed to insert filter");
-			goto fail_filter_insert;
-		}
+		rc = sfc_flow_insert(sa, flow, error);
+		if (rc != 0)
+			goto fail_flow_insert;
 	}
 
 	sfc_adapter_unlock(sa);
 
 	return flow;
 
-fail_filter_insert:
+fail_flow_insert:
 	TAILQ_REMOVE(&sa->flow_list, flow, entries);
 
 fail_bad_value:
@@ -2449,29 +2499,6 @@ sfc_flow_create(struct rte_eth_dev *dev,
 	return NULL;
 }
 
-static int
-sfc_flow_remove(struct sfc_adapter *sa,
-		struct rte_flow *flow,
-		struct rte_flow_error *error)
-{
-	int rc = 0;
-
-	SFC_ASSERT(sfc_adapter_is_locked(sa));
-
-	if (sa->state == SFC_ADAPTER_STARTED) {
-		rc = sfc_flow_filter_remove(sa, flow);
-		if (rc != 0)
-			rte_flow_error_set(error, rc,
-				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				"Failed to destroy flow rule");
-	}
-
-	TAILQ_REMOVE(&sa->flow_list, flow, entries);
-	sfc_flow_free(sa, flow);
-
-	return rc;
-}
-
 static int
 sfc_flow_destroy(struct rte_eth_dev *dev,
 		 struct rte_flow *flow,
@@ -2494,7 +2521,11 @@ sfc_flow_destroy(struct rte_eth_dev *dev,
 		goto fail_bad_value;
 	}
 
-	rc = sfc_flow_remove(sa, flow, error);
+	if (sa->state == SFC_ADAPTER_STARTED)
+		rc = sfc_flow_remove(sa, flow, error);
+
+	TAILQ_REMOVE(&sa->flow_list, flow, entries);
+	sfc_flow_free(sa, flow);
 
 fail_bad_value:
 	sfc_adapter_unlock(sa);
@@ -2508,15 +2539,21 @@ sfc_flow_flush(struct rte_eth_dev *dev,
 {
 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
 	struct rte_flow *flow;
-	int rc = 0;
 	int ret = 0;
 
 	sfc_adapter_lock(sa);
 
 	while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
-		rc = sfc_flow_remove(sa, flow, error);
-		if (rc != 0)
-			ret = rc;
+		if (sa->state == SFC_ADAPTER_STARTED) {
+			int rc;
+
+			rc = sfc_flow_remove(sa, flow, error);
+			if (rc != 0)
+				ret = rc;
+		}
+
+		TAILQ_REMOVE(&sa->flow_list, flow, entries);
+		sfc_flow_free(sa, flow);
 	}
 
 	sfc_adapter_unlock(sa);
@@ -2583,7 +2620,7 @@ sfc_flow_stop(struct sfc_adapter *sa)
 	SFC_ASSERT(sfc_adapter_is_locked(sa));
 
 	TAILQ_FOREACH(flow, &sa->flow_list, entries)
-		sfc_flow_filter_remove(sa, flow);
+		sfc_flow_remove(sa, flow, NULL);
 }
 
 int
@@ -2597,7 +2634,7 @@ sfc_flow_start(struct sfc_adapter *sa)
 	SFC_ASSERT(sfc_adapter_is_locked(sa));
 
 	TAILQ_FOREACH(flow, &sa->flow_list, entries) {
-		rc = sfc_flow_filter_insert(sa, flow);
+		rc = sfc_flow_insert(sa, flow, NULL);
 		if (rc != 0)
 			goto fail_bad_flow;
 	}
diff --git a/drivers/net/sfc/sfc_flow.h b/drivers/net/sfc/sfc_flow.h
index 19db8fce5..5d87212c1 100644
--- a/drivers/net/sfc/sfc_flow.h
+++ b/drivers/net/sfc/sfc_flow.h
@@ -91,6 +91,12 @@ typedef int (sfc_flow_parse_cb_t)(struct rte_eth_dev *dev,
 				  struct rte_flow *flow,
 				  struct rte_flow_error *error);
 
+typedef int (sfc_flow_insert_cb_t)(struct sfc_adapter *sa,
+				   struct rte_flow *flow);
+
+typedef int (sfc_flow_remove_cb_t)(struct sfc_adapter *sa,
+				   struct rte_flow *flow);
+
 #ifdef __cplusplus
 }
 #endif
-- 
2.17.1


  parent reply	other threads:[~2020-03-05 10:49 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-05 10:47 [dpdk-dev] [PATCH 0/7] net/sfc: prepare rte_flow to have one more backend Andrew Rybchenko
2020-03-05 10:47 ` [dpdk-dev] [PATCH 1/7] net/sfc: make flow RSS details VNIC-specific Andrew Rybchenko
2020-03-05 10:47 ` [dpdk-dev] [PATCH 2/7] net/sfc: make the flow list engine-agnostic Andrew Rybchenko
2020-03-05 10:47 ` [dpdk-dev] [PATCH 3/7] net/sfc: generalise the flow specification structure Andrew Rybchenko
2020-03-05 10:47 ` [dpdk-dev] [PATCH 4/7] net/sfc: introduce flow allocation and free path Andrew Rybchenko
2020-03-05 10:47 ` [dpdk-dev] [PATCH 5/7] net/sfc: generalise flow parsing Andrew Rybchenko
2020-03-05 10:47 ` Andrew Rybchenko [this message]
2020-03-05 10:47 ` [dpdk-dev] [PATCH 7/7] net/sfc: generalise flow pattern item processing Andrew Rybchenko
2020-03-06 16:51 ` [dpdk-dev] [PATCH 0/7] net/sfc: prepare rte_flow to have one more backend Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1583405273-14176-7-git-send-email-arybchenko@solarflare.com \
    --to=arybchenko@solarflare.com \
    --cc=dev@dpdk.org \
    --cc=ivan.malov@oktetlabs.ru \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).