From: Rasesh Mody <rasesh.mody@cavium.com>
To: dev@dpdk.org
Cc: Shahed Shaikh <shahed.shaikh@cavium.com>,
ferruh.yigit@intel.com, Dept-EngDPDKDev@cavium.com
Subject: [dpdk-dev] [PATCH 13/17] net/qede: add support for generic flow API
Date: Sat, 8 Sep 2018 13:31:02 -0700 [thread overview]
Message-ID: <1536438666-22184-14-git-send-email-rasesh.mody@cavium.com> (raw)
In-Reply-To: <1536438666-22184-1-git-send-email-rasesh.mody@cavium.com>
From: Shahed Shaikh <shahed.shaikh@cavium.com>
- Add support for rte_flow_validate(), rte_flow_create() and
rte_flow_destroy() APIs
- This patch adds limited support for the flow items
because of the limited filter profiles supported by HW.
- Only 4 tuples - src and dst IP (v4 or v6) addresses and
src and dst port IDs of TCP or UDP.
- also, only redirect to queue action is supported.
Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
---
drivers/net/qede/qede_ethdev.h | 5 +
drivers/net/qede/qede_filter.c | 334 +++++++++++++++++++++++++++++++++++++++-
2 files changed, 338 insertions(+), 1 deletion(-)
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index 59828f8..8a9df98 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -184,6 +184,11 @@ struct qede_arfs_entry {
SLIST_ENTRY(qede_arfs_entry) list;
};
+/* Opaque handle for rte flow managed by PMD */
+struct rte_flow {
+ struct qede_arfs_entry entry;
+};
+
struct qede_arfs_info {
struct ecore_arfs_config_params arfs;
uint16_t filter_count;
diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c
index bdf2885..5e6571c 100644
--- a/drivers/net/qede/qede_filter.c
+++ b/drivers/net/qede/qede_filter.c
@@ -8,6 +8,7 @@
#include <rte_tcp.h>
#include <rte_sctp.h>
#include <rte_errno.h>
+#include <rte_flow_driver.h>
#include "qede_ethdev.h"
@@ -1159,6 +1160,327 @@ static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
return 0;
}
+static int
+qede_flow_validate_attr(__attribute__((unused))struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (attr == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+ "NULL attribute");
+ return -rte_errno;
+ }
+
+ if (attr->group != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
+ "Groups are not supported");
+ return -rte_errno;
+ }
+
+ if (attr->priority != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
+ "Priorities are not supported");
+ return -rte_errno;
+ }
+
+ if (attr->egress != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
+ "Egress is not supported");
+ return -rte_errno;
+ }
+
+ if (attr->transfer != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
+ "Transfer is not supported");
+ return -rte_errno;
+ }
+
+ if (attr->ingress == 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
+ "Only ingress is supported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+qede_flow_parse_pattern(__attribute__((unused))struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow)
+{
+ bool l3 = false, l4 = false;
+
+ if (pattern == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
+ "NULL pattern");
+ return -rte_errno;
+ }
+
+ for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+ if (!pattern->spec) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Item spec not defined");
+ return -rte_errno;
+ }
+
+ if (pattern->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Item last not supported");
+ return -rte_errno;
+ }
+
+ if (pattern->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Item mask not supported");
+ return -rte_errno;
+ }
+
+ /* Below validation is only for 4 tuple flow
+ * (GFT_PROFILE_TYPE_4_TUPLE)
+ * - src and dst L3 address (IPv4 or IPv6)
+ * - src and dst L4 port (TCP or UDP)
+ */
+
+ switch (pattern->type) {
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = true;
+
+ if (flow) {
+ const struct rte_flow_item_ipv4 *spec;
+
+ spec = pattern->spec;
+ flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
+ flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
+ flow->entry.tuple.eth_proto = ETHER_TYPE_IPv4;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = true;
+
+ if (flow) {
+ const struct rte_flow_item_ipv6 *spec;
+
+ spec = pattern->spec;
+ rte_memcpy(flow->entry.tuple.src_ipv6,
+ spec->hdr.src_addr,
+ IPV6_ADDR_LEN);
+ rte_memcpy(flow->entry.tuple.dst_ipv6,
+ spec->hdr.dst_addr,
+ IPV6_ADDR_LEN);
+ flow->entry.tuple.eth_proto = ETHER_TYPE_IPv6;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ l4 = true;
+
+ if (flow) {
+ const struct rte_flow_item_udp *spec;
+
+ spec = pattern->spec;
+ flow->entry.tuple.src_port =
+ spec->hdr.src_port;
+ flow->entry.tuple.dst_port =
+ spec->hdr.dst_port;
+ flow->entry.tuple.ip_proto = IPPROTO_UDP;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ l4 = true;
+
+ if (flow) {
+ const struct rte_flow_item_tcp *spec;
+
+ spec = pattern->spec;
+ flow->entry.tuple.src_port =
+ spec->hdr.src_port;
+ flow->entry.tuple.dst_port =
+ spec->hdr.dst_port;
+ flow->entry.tuple.ip_proto = IPPROTO_TCP;
+ }
+
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported");
+ return -rte_errno;
+ }
+ }
+
+ if (!(l3 && l4)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Item types need to have both L3 and L4 protocols");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+qede_flow_parse_actions(struct rte_eth_dev *dev,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ const struct rte_flow_action_queue *queue;
+
+ if (actions == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
+ "NULL actions");
+ return -rte_errno;
+ }
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ queue = actions->conf;
+
+ if (queue->index >= QEDE_RSS_COUNT(qdev)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Bad QUEUE action");
+ return -rte_errno;
+ }
+
+ if (flow)
+ flow->entry.rx_queue = queue->index;
+
+ break;
+
+ default:
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Action is not supported - only ACTION_TYPE_QUEUE supported");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qede_flow_parse(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow)
+
+{
+ int rc = 0;
+
+ rc = qede_flow_validate_attr(dev, attr, error);
+ if (rc)
+ return rc;
+
+ /* parse and validate item pattern and actions.
+ * Given item list and actions will be translate to qede PMD
+ * specific arfs structure.
+ */
+ rc = qede_flow_parse_pattern(dev, patterns, error, flow);
+ if (rc)
+ return rc;
+
+ rc = qede_flow_parse_actions(dev, actions, error, flow);
+
+ return rc;
+}
+
+static int
+qede_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ return qede_flow_parse(dev, attr, patterns, actions, error, NULL);
+}
+
+static struct rte_flow *
+qede_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow *flow = NULL;
+ int rc;
+
+ flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0);
+ if (flow == NULL) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to allocate memory");
+ return NULL;
+ }
+
+ rc = qede_flow_parse(dev, attr, pattern, actions, error, flow);
+ if (rc < 0) {
+ rte_free(flow);
+ return NULL;
+ }
+
+ rc = qede_config_arfs_filter(dev, &flow->entry, true);
+ if (rc < 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to configure flow filter");
+ rte_free(flow);
+ return NULL;
+ }
+
+ return flow;
+}
+
+static int
+qede_flow_destroy(struct rte_eth_dev *eth_dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int rc = 0;
+
+ rc = qede_config_arfs_filter(eth_dev, &flow->entry, false);
+ if (rc < 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to delete flow filter");
+ rte_free(flow);
+ }
+
+ return rc;
+}
+
+const struct rte_flow_ops qede_flow_ops = {
+ .validate = qede_flow_validate,
+ .create = qede_flow_create,
+ .destroy = qede_flow_destroy,
+};
+
int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
@@ -1195,6 +1517,17 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
return qede_fdir_filter_conf(eth_dev, filter_op, arg);
case RTE_ETH_FILTER_NTUPLE:
return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_GENERIC:
+ if (ECORE_IS_CMT(edev)) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+
+ *(const void **)arg = &qede_flow_ops;
+ return 0;
case RTE_ETH_FILTER_MACVLAN:
case RTE_ETH_FILTER_ETHERTYPE:
case RTE_ETH_FILTER_FLEXIBLE:
@@ -1211,4 +1544,3 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
return 0;
}
-/* RTE_FLOW */
--
1.7.10.3
next prev parent reply other threads:[~2018-09-08 20:31 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-09-08 20:30 [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 01/17] net/qede/base: fix to handle stag update event Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 02/17] net/qede/base: add support for OneView APIs Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 03/17] net/qede/base: get pre-negotiated values for stag and bw Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 04/17] net/qede: fix to program HW regs with ether type Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 05/17] net/qede/base: limit number of non ethernet queues to 64 Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 06/17] net/qede/base: correct MCP error handler's log verbosity Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 07/17] net/qede/base: fix logic for sfp get/set Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 08/17] net/qede/base: use trust mode for forced MAC limitations Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 09/17] net/qede/base: use pointer for bytes len read Rasesh Mody
2018-09-08 20:30 ` [dpdk-dev] [PATCH 10/17] net/qede: reorganize filter code Rasesh Mody
2018-09-20 23:51 ` Ferruh Yigit
2018-09-08 20:31 ` [dpdk-dev] [PATCH 11/17] net/qede: fix flow director bug for IPv6 filter Rasesh Mody
2018-09-08 20:31 ` [dpdk-dev] [PATCH 12/17] net/qede: refactor fdir code into generic aRFS Rasesh Mody
2018-09-08 20:31 ` Rasesh Mody [this message]
2018-09-08 20:31 ` [dpdk-dev] [PATCH 14/17] net/qede: fix Rx buffer size calculation Rasesh Mody
2018-09-08 20:31 ` [dpdk-dev] [PATCH 15/17] net/qede: add support for Rx descriptor status Rasesh Mody
2018-09-08 20:31 ` [dpdk-dev] [PATCH 16/17] net/qede/base: fix MFW FLR flow bug Rasesh Mody
2018-09-08 20:31 ` [dpdk-dev] [PATCH 17/17] net/qede: add support for dev reset Rasesh Mody
2018-09-10 5:05 ` [dpdk-dev] [PATCH 00/17] net/qede: add enhancements and fixes David Marchand
2018-09-20 16:17 ` Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1536438666-22184-14-git-send-email-rasesh.mody@cavium.com \
--to=rasesh.mody@cavium.com \
--cc=Dept-EngDPDKDev@cavium.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=shahed.shaikh@cavium.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).