From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: Jiawen Wu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH 06/37] net/txgbe: add ethertype parse rule
Date: Tue, 3 Nov 2020 18:07:47 +0800 [thread overview]
Message-ID: <20201103100818.311881-7-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20201103100818.311881-1-jiawenwu@trustnetic.com>
Add support to parse flow for ethertype filter.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_flow.c | 250 +++++++++++++++++++++++++++++++++
1 file changed, 250 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 6f8be3b7f..fc2505ddc 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -534,3 +534,253 @@ txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
return 0;
}
+/**
+ * Parse the rule to see if it is a ethertype rule.
+ * And get the ethertype filter info BTW.
+ * pattern:
+ * The first not void item can be ETH.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH type 0x0807 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_action_queue *act_q;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ item = next_no_void_pattern(pattern, NULL);
+ /* The first non-void item should be MAC. */
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!rte_is_zero_ether_addr(ð_mask->src) ||
+ (!rte_is_zero_ether_addr(ð_mask->dst) &&
+ !rte_is_broadcast_ether_addr(ð_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ether address mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ /* Check if the next non-void item is END. */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter.");
+ return -rte_errno;
+ }
+
+ /* Parse action */
+
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* Parse attr */
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->transfer) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ ret = cons_parse_ethertype_filter(attr, pattern,
+ actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ /* txgbe doesn't support MAC address. */
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->queue >= dev->data->nb_rx_queues) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue index much too big");
+ return -rte_errno;
+ }
+
+ if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPV6) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "IPv4/IPv6 not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "mac compare is unsupported");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "drop option is unsupported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
--
2.18.4
next prev parent reply other threads:[~2020-11-03 10:12 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 01/37] net/txgbe: add ntuple filter init and uninit Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 02/37] net/txgbe: support ntuple filter add and delete Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 03/37] net/txgbe: add ntuple parse rule Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 04/37] net/txgbe: support ntuple filter remove operaion Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 05/37] net/txgbe: support ethertype filter add and delete Jiawen Wu
2020-11-03 10:07 ` Jiawen Wu [this message]
2020-11-03 10:07 ` [dpdk-dev] [PATCH 07/37] net/txgbe: support syn " Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 08/37] net/txgbe: add syn filter parse rule Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 09/37] net/txgbe: add L2 tunnel filter init and uninit Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 10/37] net/txgbe: config L2 tunnel filter with e-tag Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 11/37] net/txgbe: support L2 tunnel filter add and delete Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 12/37] net/txgbe: add L2 tunnel filter parse rule Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 13/37] net/txgbe: add FDIR filter init and uninit Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 14/37] net/txgbe: configure FDIR filter Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 15/37] net/txgbe: support FDIR add and delete operations Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 16/37] net/txgbe: add FDIR parse normal rule Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 17/37] net/txgbe: add FDIR parse tunnel rule Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 18/37] net/txgbe: add FDIR restore operation Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 19/37] net/txgbe: add RSS filter parse rule Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 20/37] net/txgbe: add RSS filter restore operation Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 21/37] net/txgbe: add filter list init and uninit Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 22/37] net/txgbe: add flow API Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 23/37] net/txgbe: add flow API create function Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 24/37] net/txgbe: add flow API destroy function Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 25/37] net/txgbe: add flow API flush function Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 26/37] net/txgbe: support UDP tunnel port add and delete Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 27/37] net/txgbe: add TM configuration init and uninit Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 28/37] net/txgbe: add TM capabilities get operation Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 29/37] net/txgbe: support TM shaper profile add and delete Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 30/37] net/txgbe: support TM node " Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 31/37] net/txgbe: add TM hierarchy commit Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 32/37] net/txgbe: add macsec setting Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 33/37] net/txgbe: add IPsec context creation Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 34/37] net/txgbe: add security session create operation Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 35/37] net/txgbe: support security session destroy Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 36/37] net/txgbe: add security offload in Rx and Tx process Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 37/37] net/txgbe: add security type in flow action Jiawen Wu
2020-11-09 19:21 ` [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201103100818.311881-7-jiawenwu@trustnetic.com \
--to=jiawenwu@trustnetic.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).