DPDK patches and discussions
 help / color / mirror / Atom feed
From: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
To: dev@dpdk.org, Yongseok Koh <yskoh@mellanox.com>
Cc: Adrien Mazarguil <adrien.mazarguil@6wind.com>, stable@dpdk.org
Subject: [dpdk-dev] [PATCH v3 2/2] net/mlx5: fix flow director mask
Date: Tue, 17 Apr 2018 11:01:36 +0200	[thread overview]
Message-ID: <da73eaf5f007379d425292e61046070304455e79.1523888984.git.nelio.laranjeiro@6wind.com> (raw)
In-Reply-To: <cover.1523888984.git.nelio.laranjeiro@6wind.com>
In-Reply-To: <cover.1523888984.git.nelio.laranjeiro@6wind.com>

During the transition to resurrect flow director on top of rte_flow, mask
handling was removed by mistake.

Fixes: 4c3e9bcdd52e ("net/mlx5: support flow director")
Cc: stable@dpdk.org

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 56 ++++++++++++++++++++++++++++++------
 1 file changed, 48 insertions(+), 8 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index acaa5f318..7e3bdcc66 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -442,10 +442,18 @@ struct mlx5_fdir {
 		struct rte_flow_item_ipv4 ipv4;
 		struct rte_flow_item_ipv6 ipv6;
 	} l3;
+	union {
+		struct rte_flow_item_ipv4 ipv4;
+		struct rte_flow_item_ipv6 ipv6;
+	} l3_mask;
 	union {
 		struct rte_flow_item_udp udp;
 		struct rte_flow_item_tcp tcp;
 	} l4;
+	union {
+		struct rte_flow_item_udp udp;
+		struct rte_flow_item_tcp tcp;
+	} l4_mask;
 	struct rte_flow_action_queue queue;
 };
 
@@ -2661,6 +2669,8 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
 {
 	struct priv *priv = dev->data->dev_private;
 	const struct rte_eth_fdir_input *input = &fdir_filter->input;
+	const struct rte_eth_fdir_masks *mask =
+		&dev->data->dev_conf.fdir_conf.mask;
 
 	/* Validate queue number. */
 	if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
@@ -2707,29 +2717,43 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
 			.type_of_service = input->flow.ip4_flow.tos,
 			.next_proto_id = input->flow.ip4_flow.proto,
 		};
+		attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
+			.src_addr = mask->ipv4_mask.src_ip,
+			.dst_addr = mask->ipv4_mask.dst_ip,
+			.time_to_live = mask->ipv4_mask.ttl,
+			.type_of_service = mask->ipv4_mask.tos,
+			.next_proto_id = mask->ipv4_mask.proto,
+		};
 		attributes->items[1] = (struct rte_flow_item){
 			.type = RTE_FLOW_ITEM_TYPE_IPV4,
 			.spec = &attributes->l3,
-			.mask = &attributes->l3,
+			.mask = &attributes->l3_mask,
 		};
 		break;
 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
 		attributes->l3.ipv6.hdr = (struct ipv6_hdr){
-			.hop_limits = input->flow.udp6_flow.ip.hop_limits,
-			.proto = input->flow.udp6_flow.ip.proto,
+			.hop_limits = input->flow.ipv6_flow.hop_limits,
+			.proto = input->flow.ipv6_flow.proto,
 		};
+
 		memcpy(attributes->l3.ipv6.hdr.src_addr,
 		       input->flow.ipv6_flow.src_ip,
 		       RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
 		memcpy(attributes->l3.ipv6.hdr.dst_addr,
 		       input->flow.ipv6_flow.dst_ip,
 		       RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+		memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
+		       mask->ipv6_mask.src_ip,
+		       RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
+		memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
+		       mask->ipv6_mask.dst_ip,
+		       RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
 		attributes->items[1] = (struct rte_flow_item){
 			.type = RTE_FLOW_ITEM_TYPE_IPV6,
 			.spec = &attributes->l3,
-			.mask = &attributes->l3,
+			.mask = &attributes->l3_mask,
 		};
 		break;
 	default:
@@ -2745,10 +2769,14 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
 			.src_port = input->flow.udp4_flow.src_port,
 			.dst_port = input->flow.udp4_flow.dst_port,
 		};
+		attributes->l4_mask.udp.hdr = (struct udp_hdr){
+			.src_port = mask->src_port_mask,
+			.dst_port = mask->dst_port_mask,
+		};
 		attributes->items[2] = (struct rte_flow_item){
 			.type = RTE_FLOW_ITEM_TYPE_UDP,
 			.spec = &attributes->l4,
-			.mask = &attributes->l4,
+			.mask = &attributes->l4_mask,
 		};
 		break;
 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
@@ -2756,10 +2784,14 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
 			.src_port = input->flow.tcp4_flow.src_port,
 			.dst_port = input->flow.tcp4_flow.dst_port,
 		};
+		attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
+			.src_port = mask->src_port_mask,
+			.dst_port = mask->dst_port_mask,
+		};
 		attributes->items[2] = (struct rte_flow_item){
 			.type = RTE_FLOW_ITEM_TYPE_TCP,
 			.spec = &attributes->l4,
-			.mask = &attributes->l4,
+			.mask = &attributes->l4_mask,
 		};
 		break;
 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
@@ -2767,10 +2799,14 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
 			.src_port = input->flow.udp6_flow.src_port,
 			.dst_port = input->flow.udp6_flow.dst_port,
 		};
+		attributes->l4_mask.udp.hdr = (struct udp_hdr){
+			.src_port = mask->src_port_mask,
+			.dst_port = mask->dst_port_mask,
+		};
 		attributes->items[2] = (struct rte_flow_item){
 			.type = RTE_FLOW_ITEM_TYPE_UDP,
 			.spec = &attributes->l4,
-			.mask = &attributes->l4,
+			.mask = &attributes->l4_mask,
 		};
 		break;
 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
@@ -2778,10 +2814,14 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
 			.src_port = input->flow.tcp6_flow.src_port,
 			.dst_port = input->flow.tcp6_flow.dst_port,
 		};
+		attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
+			.src_port = mask->src_port_mask,
+			.dst_port = mask->dst_port_mask,
+		};
 		attributes->items[2] = (struct rte_flow_item){
 			.type = RTE_FLOW_ITEM_TYPE_TCP,
 			.spec = &attributes->l4,
-			.mask = &attributes->l4,
+			.mask = &attributes->l4_mask,
 		};
 		break;
 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
-- 
2.17.0

  parent reply	other threads:[~2018-04-17  9:01 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-04-12 14:31 [dpdk-dev] [PATCH 0/2] " Nelio Laranjeiro
2018-04-12 14:31 ` [dpdk-dev] [PATCH 1/2] net/mlx5: split L3/L4 in flow director Nelio Laranjeiro
2018-04-12 14:31 ` [dpdk-dev] [PATCH 2/2] net/mlx5: fix flow director mask Nelio Laranjeiro
2018-04-13 15:28 ` [dpdk-dev] [PATCH v2 0/2] " Nelio Laranjeiro
2018-04-17  9:01   ` [dpdk-dev] [PATCH v3 " Nelio Laranjeiro
2018-04-23  5:39     ` Shahaf Shuler
2018-04-17  9:01   ` [dpdk-dev] [PATCH v3 1/2] net/mlx5: split L3/L4 in flow director Nelio Laranjeiro
2018-04-17  9:01   ` Nelio Laranjeiro [this message]
2018-04-13 15:28 ` [dpdk-dev] [PATCH v2 " Nelio Laranjeiro
2018-04-13 15:28 ` [dpdk-dev] [PATCH v2 2/2] net/mlx5: fix flow director mask Nelio Laranjeiro

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=da73eaf5f007379d425292e61046070304455e79.1523888984.git.nelio.laranjeiro@6wind.com \
    --to=nelio.laranjeiro@6wind.com \
    --cc=adrien.mazarguil@6wind.com \
    --cc=dev@dpdk.org \
    --cc=stable@dpdk.org \
    --cc=yskoh@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).