* [dpdk-dev] [PATCH] net-i40e-Match-on-outer-and-inner-headers-for-tunnel
@ 2018-12-05 11:13 subarna.kar
2018-12-06 11:44 ` Zhang, Qi Z
0 siblings, 1 reply; 6+ messages in thread
From: subarna.kar @ 2018-12-05 11:13 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, jingjing.wu, Subarna Kar
From: Subarna Kar <subarna.kar@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 14 +-
drivers/net/i40e/i40e_ethdev.h | 109 +++++-
drivers/net/i40e/i40e_fdir.c | 392 +++++++++++++++++++--
drivers/net/i40e/i40e_flow.c | 781 ++++++++++++++++++++++++++++++++++++++++-
4 files changed, 1250 insertions(+), 46 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 7030eb1..0e9f22d 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1788,6 +1788,7 @@ i40e_dev_configure(struct rte_eth_dev *dev)
* legacy filter API is deprecated, the following codes should also be
* removed.
*/
+ dev->data->dev_conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
ret = i40e_fdir_setup(pf);
if (ret != I40E_SUCCESS) {
@@ -9055,7 +9056,11 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
- I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ I40E_INSET_IPV4_PROTO | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_TUNNEL_IPV4_DST |
+ I40E_INSET_TUNNEL_DST_PORT |
+ I40E_INSET_TUNNEL_IPV6_DST,
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
@@ -9096,7 +9101,10 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
- I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_TUNNEL_ID | I40E_INSET_TUNNEL_DST_PORT |
+ I40E_INSET_TUNNEL_IPV4_DST | I40E_INSET_TUNNEL_IPV6_DST,
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
@@ -12384,7 +12392,6 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
uint32_t buff_size;
uint32_t i;
int ret;
-
if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
PMD_DRV_LOG(ERR, "Unsupported operation.");
@@ -12403,7 +12410,6 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
PMD_DRV_LOG(INFO, "No new protocol added");
return;
}
-
buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
proto = rte_zmalloc("new_proto", buff_size, 0);
if (!proto) {
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 11ecfc3..f7311ce 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -466,6 +466,92 @@ struct i40e_vmdq_info {
#define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
#define I40E_FDIR_IPv6_TC_OFFSET 20
+/* A structure used to define input for MPLSoUDP flow */
+struct i40e_mplsoudpv4_udpv4_flow {
+ struct rte_eth_udpv4_flow outer_udp; //outer IPv4
+ struct rte_flow_item_mpls mpls;
+ struct rte_eth_udpv4_flow inner_udp; //inner IPv4
+};
+
+struct i40e_mplsoudpv4_udpv6_flow {
+ struct rte_eth_udpv4_flow outer_udp; //outer IPv4
+ struct rte_flow_item_mpls mpls;
+ struct rte_eth_udpv6_flow inner6_udp; //inner IPv6
+};
+
+struct i40e_mplsoudpv6_udpv4_flow {
+ struct rte_eth_udpv6_flow outer6_udp; //outer IPv6
+ struct rte_flow_item_mpls mpls;
+ struct rte_eth_udpv4_flow inner_udp; //inner IPv4
+};
+
+struct i40e_mplsoudpv6_udpv6_flow {
+ struct rte_eth_udpv6_flow outer6_udp; //outer IPv6
+ struct rte_flow_item_mpls mpls;
+ struct rte_eth_udpv6_flow inner6_udp; //inner IPv6
+};
+
+/* A structure used to define input for VXLAN flow */
+struct i40e_vxlanv4_udpv4_flow {
+ struct rte_eth_udpv4_flow outer_udp; //outer IPv4
+ struct rte_flow_item_vxlan vxlan;
+ struct rte_eth_udpv4_flow inner_udp; //inner IPv4
+};
+
+struct i40e_vxlanv4_udpv6_flow {
+ struct rte_eth_udpv4_flow outer_udp; //outer IPv4
+ struct rte_flow_item_vxlan vxlan;
+ struct rte_eth_udpv6_flow inner6_udp; //inner IPv6
+};
+
+struct i40e_vxlanv6_udpv4_flow {
+ struct rte_eth_udpv6_flow outer6_udp; //outer IPv6
+ struct rte_flow_item_vxlan vxlan;
+ struct rte_eth_udpv4_flow inner_udp; //inner IPv4
+};
+
+struct i40e_vxlanv6_udpv6_flow {
+ struct rte_eth_udpv6_flow outer6_udp; //outer IPv6
+ struct rte_flow_item_vxlan vxlan;
+ struct rte_eth_udpv6_flow inner6_udp; //inner IPv6
+};
+
+/* A structure used to define input for MPLSoGRE flow */
+
+struct i40e_gre_flow {
+ struct rte_eth_ipv4_flow ip;
+ struct rte_flow_item_gre gre;
+};
+
+struct i40e_mplsogrev4_udpv4_flow {
+ struct i40e_gre_flow outer_gre; //outer IPv4
+ struct rte_flow_item_mpls mpls;
+ struct rte_eth_udpv4_flow inner_udp; //inner IPv4
+};
+
+struct i40e_mplsogrev4_udpv6_flow {
+ struct i40e_gre_flow outer_gre; //outer IPv4
+ struct rte_flow_item_mpls mpls;
+ struct rte_eth_udpv6_flow inner6_udp; //inner IPv6
+};
+
+struct i40e_gre_ipv6_flow {
+ struct rte_eth_ipv6_flow ip;
+ struct rte_flow_item_gre gre;
+};
+
+struct i40e_mplsogrev6_udpv4_flow {
+ struct i40e_gre_ipv6_flow outer6_gre; //outer IPv6
+ struct rte_flow_item_mpls mpls;
+ struct rte_eth_udpv4_flow inner_udp; //inner IPv4
+};
+
+struct i40e_mplsogrev6_udpv6_flow {
+ struct i40e_gre_ipv6_flow outer6_gre; //outer IPv6
+ struct rte_flow_item_mpls mpls;
+ struct rte_eth_udpv6_flow inner6_udp; //inner IPv6
+};
+
/* A structure used to define the input for GTP flow */
struct i40e_gtp_flow {
struct rte_eth_udpv4_flow udp; /* IPv4 UDP fields to match. */
@@ -510,6 +596,19 @@ union i40e_fdir_flow {
struct i40e_gtp_ipv4_flow gtp_ipv4_flow;
struct i40e_gtp_ipv6_flow gtp_ipv6_flow;
struct i40e_raw_flow raw_flow;
+ //this is for tunneled pkts to match on outer and inner
+ struct i40e_mplsoudpv4_udpv4_flow mplsudp4_flow;
+ struct i40e_mplsoudpv4_udpv6_flow mplsudp6_flow;
+ struct i40e_mplsoudpv6_udpv4_flow mpls6udp4_flow;
+ struct i40e_mplsoudpv6_udpv6_flow mpls6udp6_flow;
+ struct i40e_mplsogrev4_udpv4_flow mplsgre4_flow;
+ struct i40e_mplsogrev4_udpv6_flow mplsgre6_flow;
+ struct i40e_mplsogrev6_udpv4_flow mpls6gre4_flow;
+ struct i40e_mplsogrev6_udpv6_flow mpls6gre6_flow;
+ struct i40e_vxlanv4_udpv4_flow vxlanudp4_flow;
+ struct i40e_vxlanv4_udpv6_flow vxlanudp6_flow;
+ struct i40e_vxlanv6_udpv4_flow vxlan6udp4_flow;
+ struct i40e_vxlanv6_udpv6_flow vxlan6udp6_flow;
};
enum i40e_fdir_ip_type {
@@ -517,6 +616,12 @@ enum i40e_fdir_ip_type {
I40E_FDIR_IPTYPE_IPV6,
};
+enum i40e_fdir_tunnel_type {
+ MPLSOUDP,
+ MPLSOGRE,
+ VXLAN,
+};
+
/* A structure used to contain extend input of flow */
struct i40e_fdir_flow_ext {
uint16_t vlan_tci;
@@ -528,6 +633,8 @@ struct i40e_fdir_flow_ext {
enum i40e_fdir_ip_type iip_type; /* ip type for inner ip */
bool customized_pctype; /* If customized pctype is used */
bool pkt_template; /* If raw packet template is used */
+ bool tunnel; /*if its tunnel pkts, then allow inner headers. */
+ enum i40e_fdir_tunnel_type tunnel_val;
};
/* A structure used to define the input for a flow director filter entry */
@@ -748,8 +855,8 @@ enum i40e_tunnel_type {
I40E_TUNNEL_TYPE_NVGRE,
I40E_TUNNEL_TYPE_IP_IN_GRE,
I40E_L2_TUNNEL_TYPE_E_TAG,
- I40E_TUNNEL_TYPE_MPLSoUDP,
I40E_TUNNEL_TYPE_MPLSoGRE,
+ I40E_TUNNEL_TYPE_MPLSoUDP,
I40E_TUNNEL_TYPE_QINQ,
I40E_TUNNEL_TYPE_GTPC,
I40E_TUNNEL_TYPE_GTPU,
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index d41601a..253ea56 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -35,6 +35,7 @@
#define I40E_FDIR_PKT_LEN 512
#define I40E_FDIR_IP_DEFAULT_LEN 420
+#define I40E_FDIR_IP_DEFAULT_LEN_INNER 370
#define I40E_FDIR_IP_DEFAULT_TTL 0x40
#define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
#define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
@@ -43,6 +44,7 @@
#define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
#define I40E_FDIR_IPv6_PAYLOAD_LEN 380
#define I40E_FDIR_UDP_DEFAULT_LEN 400
+#define I40E_FDIR_UDP_DEFAULT_LEN_INNER 350
#define I40E_FDIR_GTP_DEFAULT_LEN 384
#define I40E_FDIR_INNER_IP_DEFAULT_LEN 384
#define I40E_FDIR_INNER_IPV6_DEFAULT_LEN 344
@@ -53,7 +55,7 @@
#define I40E_FDIR_GTP_VER_FLAG_0X32 0x32
#define I40E_FDIR_GTP_MSG_TYPE_0X01 0x01
#define I40E_FDIR_GTP_MSG_TYPE_0XFF 0xFF
-
+#define VXLAN_DEFAULT_FLAG 0x08
/* Wait time for fdir filter programming */
#define I40E_FDIR_MAX_WAIT_US 10000
@@ -951,15 +953,138 @@ i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
}
static inline int
+i40e_flow_fdir_fill_outer_header(const struct i40e_fdir_input *fdir_input,
+ unsigned char *raw_pkt)
+{
+ struct ipv4_hdr *ip;
+ struct ipv6_hdr *ip6;
+ struct udp_hdr *udp;
+ struct rte_flow_item_gre *gre;
+ struct rte_flow_item_mpls *mpls;
+ struct rte_flow_item_vxlan *vxlan;
+ uint16_t *ether_type;
+ uint8_t len = 2 * sizeof(struct ether_addr);
+ raw_pkt += 2 * sizeof(struct ether_addr);
+ ether_type = (uint16_t *)raw_pkt;
+ raw_pkt += sizeof(uint16_t);
+ len += sizeof(uint16_t);
+
+
+ //we don't need ETH, so direct skip to IP
+ if (fdir_input->flow_ext.iip_type == I40E_FDIR_IPTYPE_IPV4) {
+ ip = (struct ipv4_hdr *)raw_pkt;
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ ip->version_ihl =
+ I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+ /* set len to by default */
+ ip->total_length =
+ rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
+ ip->time_to_live = I40E_FDIR_IP_DEFAULT_TTL;
+ ip->type_of_service = fdir_input->flow.ip4_flow.tos;
+ ip->src_addr = fdir_input->flow.ip4_flow.src_ip;
+ ip->dst_addr = fdir_input->flow.ip4_flow.dst_ip;
+ if (fdir_input->flow_ext.tunnel_val == MPLSOGRE)
+ ip->next_proto_id = IPPROTO_GRE;
+ else
+ ip->next_proto_id = IPPROTO_UDP;
+ len += sizeof(struct ipv4_hdr);
+ raw_pkt += sizeof(struct ipv4_hdr);
+ }
+ if (fdir_input->flow_ext.iip_type == I40E_FDIR_IPTYPE_IPV6) {
+ ip6 = (struct ipv6_hdr *)raw_pkt;
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ ip6->vtc_flow =
+ rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW);
+ ip6->payload_len =
+ rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+ ip6->hop_limits =
+ I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+ ip6->proto =
+ fdir_input->flow.mpls6udp4_flow.outer6_udp.ip.proto;
+ rte_memcpy(&ip6->src_addr,
+ &fdir_input->flow.mpls6udp4_flow.outer6_udp.ip.src_ip,
+ IPV6_ADDR_LEN);
+ rte_memcpy(&ip6->dst_addr,
+ &fdir_input->flow.mpls6udp4_flow.outer6_udp.ip.dst_ip,
+ IPV6_ADDR_LEN);
+ len += sizeof(struct ipv6_hdr);
+ raw_pkt += sizeof(struct ipv6_hdr);
+ }
+ if (fdir_input->flow_ext.tunnel_val == MPLSOGRE) {
+ gre = (struct rte_flow_item_gre *)raw_pkt;
+ gre->protocol =
+ fdir_input->flow.mplsgre4_flow.outer_gre.gre.protocol;
+ len += sizeof(struct rte_flow_item_gre);
+ raw_pkt += sizeof(struct rte_flow_item_gre);
+ } else {//now put UDP info for VXLAN and MPLSoUDP
+ udp = (struct udp_hdr *)raw_pkt;
+ if (fdir_input->flow_ext.iip_type == I40E_FDIR_IPTYPE_IPV4) {
+ udp->src_port =
+ fdir_input->flow.mplsudp4_flow.outer_udp.src_port;
+ udp->dst_port =
+ fdir_input->flow.mplsudp4_flow.outer_udp.dst_port;
+ } else {
+ udp->src_port =
+ fdir_input->flow.mpls6udp4_flow.outer6_udp.src_port;
+ udp->dst_port =
+ fdir_input->flow.mpls6udp4_flow.outer6_udp.dst_port;
+ }
+ udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+ len += sizeof(struct udp_hdr);
+ raw_pkt += sizeof(struct udp_hdr);
+ }
+ if (fdir_input->flow_ext.tunnel_val == VXLAN) {
+ vxlan = (struct rte_flow_item_vxlan *)raw_pkt;
+ if (fdir_input->flow_ext.iip_type == I40E_FDIR_IPTYPE_IPV4) {
+ vxlan->vni[0] =
+ fdir_input->flow.vxlanudp4_flow.vxlan.vni[0];
+ vxlan->vni[1] =
+ fdir_input->flow.vxlanudp4_flow.vxlan.vni[1];
+ vxlan->vni[2] =
+ fdir_input->flow.vxlanudp4_flow.vxlan.vni[2];
+ } else {
+ vxlan->vni[0] =
+ fdir_input->flow.vxlan6udp4_flow.vxlan.vni[0];
+ vxlan->vni[1] =
+ fdir_input->flow.vxlan6udp4_flow.vxlan.vni[1];
+ vxlan->vni[2] =
+ fdir_input->flow.vxlan6udp4_flow.vxlan.vni[2];
+ }
+ vxlan->flags = VXLAN_DEFAULT_FLAG;
+ len += sizeof(struct rte_flow_item_vxlan);
+ } else {
+ //now put MPLS info for MPLSoUDP and MPLSoGRE
+ mpls = (struct rte_flow_item_mpls *)raw_pkt;
+ if (fdir_input->flow_ext.iip_type == I40E_FDIR_IPTYPE_IPV4) {
+ mpls->label_tc_s[0] =
+ fdir_input->flow.mplsudp4_flow.mpls.label_tc_s[0];
+ mpls->label_tc_s[1] =
+ fdir_input->flow.mplsudp4_flow.mpls.label_tc_s[1];
+ mpls->label_tc_s[2] =
+ fdir_input->flow.mplsudp4_flow.mpls.label_tc_s[2];
+ } else {
+ mpls->label_tc_s[0] =
+ fdir_input->flow.mpls6udp4_flow.mpls.label_tc_s[0];
+ mpls->label_tc_s[1] =
+ fdir_input->flow.mpls6udp4_flow.mpls.label_tc_s[1];
+ mpls->label_tc_s[2] =
+ fdir_input->flow.mpls6udp4_flow.mpls.label_tc_s[2];
+ }
+ len += sizeof(struct rte_flow_item_mpls);
+ }
+ return len;
+}
+
+static inline int
i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
const struct i40e_fdir_input *fdir_input,
unsigned char *raw_pkt,
- bool vlan)
+ bool vlan, int outer_len)
{
struct i40e_customized_pctype *cus_pctype = NULL;
static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
uint16_t *ether_type;
- uint8_t len = 2 * sizeof(struct ether_addr);
+ uint8_t len = 2 * sizeof(struct ether_addr) + outer_len;
struct ipv4_hdr *ip;
struct ipv6_hdr *ip6;
uint8_t pctype = fdir_input->pctype;
@@ -977,7 +1102,7 @@ i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
};
- raw_pkt += 2 * sizeof(struct ether_addr);
+ raw_pkt += 2 * sizeof(struct ether_addr) + outer_len;
if (vlan && fdir_input->flow_ext.vlan_tci) {
rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
rte_memcpy(raw_pkt + sizeof(uint16_t),
@@ -1011,12 +1136,82 @@ i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
- /* set len to by default */
- ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
- ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
- fdir_input->flow.ip4_flow.ttl :
+
+ if (outer_len) {//we have a tunneled pkt
+ if (fdir_input->flow_ext.tunnel_val != VXLAN) {
+ if (fdir_input->flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4) {
+ ip->time_to_live =
+ fdir_input->flow.mplsudp4_flow.inner_udp.ip.ttl ?
+ fdir_input->flow.mplsudp4_flow.inner_udp.ip.ttl :
I40E_FDIR_IP_DEFAULT_TTL;
- ip->type_of_service = fdir_input->flow.ip4_flow.tos;
+ ip->total_length =
+ rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN_INNER);
+ ip->type_of_service =
+ fdir_input->flow.mplsudp4_flow.inner_udp.ip.tos;
+ ip->src_addr =
+ fdir_input->flow.mplsudp4_flow.inner_udp.ip.dst_ip;
+ ip->dst_addr =
+ fdir_input->flow.mplsudp4_flow.inner_udp.ip.src_ip;
+ ip->next_proto_id =
+ fdir_input->flow.mplsudp4_flow.inner_udp.ip.proto;
+ } else {
+ ip->time_to_live =
+ fdir_input->flow.mpls6udp4_flow.inner_udp.ip.ttl ?
+ fdir_input->flow.mpls6udp4_flow.inner_udp.ip.ttl :
+ I40E_FDIR_IP_DEFAULT_TTL;
+ ip->total_length =
+ rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN_INNER);
+ ip->type_of_service =
+ fdir_input->flow.mpls6udp4_flow.inner_udp.ip.tos;
+ ip->src_addr =
+ fdir_input->flow.mpls6udp4_flow.inner_udp.ip.dst_ip;
+ ip->dst_addr =
+ fdir_input->flow.mpls6udp4_flow.inner_udp.ip.src_ip;
+ ip->next_proto_id =
+ fdir_input->flow.mpls6udp4_flow.inner_udp.ip.proto;
+ }
+ } else {
+ if (fdir_input->flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4) {
+ ip->time_to_live =
+ fdir_input->flow.vxlanudp4_flow.inner_udp.ip.ttl ?
+ fdir_input->flow.vxlanudp4_flow.inner_udp.ip.ttl :
+ I40E_FDIR_IP_DEFAULT_TTL;
+ ip->total_length =
+ rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN_INNER);
+ ip->type_of_service =
+ fdir_input->flow.vxlanudp4_flow.inner_udp.ip.tos;
+ ip->src_addr =
+ fdir_input->flow.vxlanudp4_flow.inner_udp.ip.dst_ip;
+ ip->dst_addr =
+ fdir_input->flow.vxlanudp4_flow.inner_udp.ip.src_ip;
+ ip->next_proto_id =
+ fdir_input->flow.vxlanudp4_flow.inner_udp.ip.proto;
+ } else {
+ ip->time_to_live =
+ fdir_input->flow.vxlan6udp4_flow.inner_udp.ip.ttl ?
+ fdir_input->flow.vxlan6udp4_flow.inner_udp.ip.ttl :
+ I40E_FDIR_IP_DEFAULT_TTL;
+ ip->total_length =
+ rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN_INNER);
+ ip->type_of_service =
+ fdir_input->flow.vxlan6udp4_flow.inner_udp.ip.tos;
+ ip->src_addr =
+ fdir_input->flow.vxlan6udp4_flow.inner_udp.ip.dst_ip;
+ ip->dst_addr =
+ fdir_input->flow.vxlan6udp4_flow.inner_udp.ip.src_ip;
+ ip->next_proto_id =
+ fdir_input->flow.vxlan6udp4_flow.inner_udp.ip.proto;
+ }
+ }
+ len += sizeof(struct ipv4_hdr);
+ } else {
+ /* set len to by default */
+ ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
+ ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
+ fdir_input->flow.ip4_flow.ttl : I40E_FDIR_IP_DEFAULT_TTL;
+ ip->type_of_service = fdir_input->flow.ip4_flow.tos;
/**
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
@@ -1033,8 +1228,10 @@ i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
cus_pctype->index == I40E_CUSTOMIZED_GTPU)
+
ip->next_proto_id = IPPROTO_UDP;
len += sizeof(struct ipv4_hdr);
+ }
} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
@@ -1043,30 +1240,75 @@ i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
ip6 = (struct ipv6_hdr *)raw_pkt;
*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
- ip6->vtc_flow =
- rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
- (fdir_input->flow.ipv6_flow.tc <<
- I40E_FDIR_IPv6_TC_OFFSET));
- ip6->payload_len =
+
+ if (outer_len) {
+ ip6->vtc_flow =
+ rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW);
+ ip6->payload_len =
rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
- ip6->proto = fdir_input->flow.ipv6_flow.proto ?
- fdir_input->flow.ipv6_flow.proto :
- next_proto[fdir_input->pctype];
- ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
- fdir_input->flow.ipv6_flow.hop_limits :
+ ip6->hop_limits =
I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+ if (fdir_input->flow_ext.tunnel_val != VXLAN) {
+ if (fdir_input->flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4) {
+ ip6->proto =
+ fdir_input->flow.mplsudp6_flow.inner6_udp.ip.proto;
+ rte_memcpy(&ip6->src_addr,
+ &fdir_input->flow.mplsudp6_flow.inner6_udp.ip.dst_ip, IPV6_ADDR_LEN);
+ rte_memcpy(&ip6->dst_addr,
+ &fdir_input->flow.mplsudp6_flow.inner6_udp.ip.src_ip, IPV6_ADDR_LEN);
+ } else {
+ ip6->proto =
+ fdir_input->flow.mpls6udp6_flow.inner6_udp.ip.proto;
+ rte_memcpy(&ip6->src_addr,
+ &fdir_input->flow.mpls6udp6_flow.inner6_udp.ip.dst_ip, IPV6_ADDR_LEN);
+ rte_memcpy(&ip6->dst_addr,
+ &fdir_input->flow.mpls6udp6_flow.inner6_udp.ip.src_ip, IPV6_ADDR_LEN);
+ }
+ } else {
+ if (fdir_input->flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4) {
+ ip6->proto =
+ fdir_input->flow.vxlanudp6_flow.inner6_udp.ip.proto;
+ rte_memcpy(&ip6->src_addr,
+ &fdir_input->flow.vxlanudp6_flow.inner6_udp.ip.dst_ip, IPV6_ADDR_LEN);
+ rte_memcpy(&ip6->dst_addr,
+ &fdir_input->flow.vxlanudp6_flow.inner6_udp.ip.src_ip, IPV6_ADDR_LEN);
+ } else {
+ ip6->proto =
+ fdir_input->flow.vxlan6udp6_flow.inner6_udp.ip.proto;
+ rte_memcpy(&ip6->src_addr,
+ &fdir_input->flow.vxlan6udp6_flow.inner6_udp.ip.dst_ip, IPV6_ADDR_LEN);
+ rte_memcpy(&ip6->dst_addr,
+ &fdir_input->flow.vxlan6udp6_flow.inner6_udp.ip.src_ip, IPV6_ADDR_LEN);
+ }
+ }
+ len += sizeof(struct ipv6_hdr);
+ } else {
+ ip6->vtc_flow =
+ rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+ (fdir_input->flow.ipv6_flow.tc <<
+ I40E_FDIR_IPv6_TC_OFFSET));
+ ip6->payload_len =
+ rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+ ip6->proto = fdir_input->flow.ipv6_flow.proto ?
+ fdir_input->flow.ipv6_flow.proto :
+ next_proto[fdir_input->pctype];
+ ip6->hop_limits =
+ fdir_input->flow.ipv6_flow.hop_limits ?
+ fdir_input->flow.ipv6_flow.hop_limits :
+ I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
/**
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
* to the expected received packets.
*/
- rte_memcpy(&ip6->src_addr,
- &fdir_input->flow.ipv6_flow.dst_ip,
- IPV6_ADDR_LEN);
- rte_memcpy(&ip6->dst_addr,
- &fdir_input->flow.ipv6_flow.src_ip,
- IPV6_ADDR_LEN);
- len += sizeof(struct ipv6_hdr);
+ rte_memcpy(&ip6->src_addr,
+ &fdir_input->flow.ipv6_flow.dst_ip, IPV6_ADDR_LEN);
+ rte_memcpy(&ip6->dst_addr,
+ &fdir_input->flow.ipv6_flow.src_ip, IPV6_ADDR_LEN);
+ len += sizeof(struct ipv6_hdr);
+ }
} else {
PMD_DRV_LOG(ERR, "unknown pctype %u.",
fdir_input->pctype);
@@ -1097,7 +1339,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
struct ipv6_hdr *gtp_ipv6;
uint8_t size, dst = 0;
uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
- int len;
+ int len, outer_len = 0;
uint8_t pctype = fdir_input->pctype;
struct i40e_customized_pctype *cus_pctype;
@@ -1108,9 +1350,14 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
return 0;
}
+ //add outer header info to raw pkt, if tunnel flow
+ if (fdir_input->flow_ext.tunnel)
+ outer_len = i40e_flow_fdir_fill_outer_header(fdir_input, raw_pkt);
+
/* fill the ethernet and IP head */
len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
- !!fdir_input->flow_ext.vlan_tci);
+ !!fdir_input->flow_ext.vlan_tci,
+ outer_len);
if (len < 0)
return -EINVAL;
@@ -1123,9 +1370,46 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
* need to be presented in a reversed order with respect
* to the expected received packets.
*/
- udp->src_port = fdir_input->flow.udp4_flow.dst_port;
- udp->dst_port = fdir_input->flow.udp4_flow.src_port;
- udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+ if (fdir_input->flow_ext.tunnel_val != VXLAN) {
+ if (fdir_input->flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4) {
+ udp->src_port =
+ fdir_input->flow.mplsudp4_flow.inner_udp.dst_port;
+ udp->dst_port =
+ fdir_input->flow.mplsudp4_flow.inner_udp.src_port;
+ udp->dgram_len =
+ rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN_INNER);
+ } else {
+ udp->src_port =
+ fdir_input->flow.mpls6udp4_flow.inner_udp.dst_port;
+ udp->dst_port =
+ fdir_input->flow.mpls6udp4_flow.inner_udp.src_port;
+ udp->dgram_len =
+ rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN_INNER);
+ }
+ } else if (fdir_input->flow_ext.tunnel_val == VXLAN) {
+ if (fdir_input->flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4) {
+ udp->src_port =
+ fdir_input->flow.vxlanudp4_flow.inner_udp.dst_port;
+ udp->dst_port =
+ fdir_input->flow.vxlanudp4_flow.inner_udp.src_port;
+ udp->dgram_len =
+ rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN_INNER);
+ } else {
+ udp->src_port =
+ fdir_input->flow.vxlan6udp4_flow.inner_udp.dst_port;
+ udp->dst_port =
+ fdir_input->flow.vxlan6udp4_flow.inner_udp.src_port;
+ udp->dgram_len =
+ rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN_INNER);
+ }
+ } else {
+ udp->src_port = fdir_input->flow.udp4_flow.dst_port;
+ udp->dst_port = fdir_input->flow.udp4_flow.src_port;
+ udp->dgram_len =
+ rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+ }
} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
tcp = (struct tcp_hdr *)(raw_pkt + len);
payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
@@ -1160,9 +1444,46 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
* need to be presented in a reversed order with respect
* to the expected received packets.
*/
- udp->src_port = fdir_input->flow.udp6_flow.dst_port;
- udp->dst_port = fdir_input->flow.udp6_flow.src_port;
- udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+ if (fdir_input->flow_ext.tunnel_val != VXLAN) {
+ if (fdir_input->flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4) {
+ udp->src_port =
+ fdir_input->flow.mplsudp6_flow.inner6_udp.dst_port;
+ udp->dst_port =
+ fdir_input->flow.mplsudp6_flow.inner6_udp.src_port;
+ udp->dgram_len =
+ rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN_INNER);
+ } else {
+ udp->src_port =
+ fdir_input->flow.mpls6udp6_flow.inner6_udp.dst_port;
+ udp->dst_port =
+ fdir_input->flow.mpls6udp6_flow.inner6_udp.src_port;
+ udp->dgram_len =
+ rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN_INNER);
+ }
+ } else if (fdir_input->flow_ext.tunnel_val == VXLAN) {
+ if (fdir_input->flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4) {
+ udp->src_port =
+ fdir_input->flow.vxlanudp6_flow.inner6_udp.dst_port;
+ udp->dst_port =
+ fdir_input->flow.vxlanudp6_flow.inner6_udp.src_port;
+ udp->dgram_len =
+ rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN_INNER);
+ } else {
+ udp->src_port =
+ fdir_input->flow.vxlan6udp6_flow.inner6_udp.dst_port;
+ udp->dst_port =
+ fdir_input->flow.vxlan6udp6_flow.inner6_udp.src_port;
+ udp->dgram_len =
+ rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN_INNER);
+ }
+ } else {
+ udp->src_port = fdir_input->flow.udp6_flow.dst_port;
+ udp->dst_port = fdir_input->flow.udp6_flow.src_port;
+ udp->dgram_len =
+ rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+ }
} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
tcp = (struct tcp_hdr *)(raw_pkt + len);
payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
@@ -1265,6 +1586,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
payload = (unsigned char *)gtp +
sizeof(struct rte_flow_item_gtp);
}
+
} else {
PMD_DRV_LOG(ERR, "unknown pctype %u.",
fdir_input->pctype);
@@ -1564,7 +1886,7 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
pctype = filter->input.pctype;
}
- /* Check if there is the filter in SW list */
+ /* Check if there is the filter in SW list */
memset(&check_filter, 0, sizeof(check_filter));
i40e_fdir_filter_convert(filter, &check_filter);
node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 3694df2..5b979ca 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -80,6 +80,12 @@ static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error,
union i40e_filter_t *filter);
+static int i40e_flow_parse_fdir_tunnel_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -136,6 +142,8 @@ const struct rte_flow_ops i40e_flow_ops = {
static union i40e_filter_t cons_filter;
static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
+#define TUNNEL_FDIR_FUNCTION i40e_flow_parse_fdir_tunnel_filter
+
/* Pattern matched ethertype filter */
static enum rte_flow_item_type pattern_ethertype[] = {
RTE_FLOW_ITEM_TYPE_ETH,
@@ -1543,6 +1551,50 @@ static enum rte_flow_item_type pattern_vxlan_4[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+static enum rte_flow_item_type pattern_vxlan_5[] = { //IPv4+IPv4
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_6[] = { //IPv4+IPv6
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_7[] = { //IPv6+IPv4
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_8[] = { //IPv6+IPv6
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static enum rte_flow_item_type pattern_nvgre_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
@@ -1609,6 +1661,96 @@ static enum rte_flow_item_type pattern_mpls_4[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+//adding only for IPv4. adding MPLS flow match for inner packet
+
+static enum rte_flow_item_type pattern_mpls_5[] = { //IPv4+IPv4
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_6[] = { //IPv4 +IPv6
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_7[] = { //IPv6+IPv4
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_8[] = { //IPv6+IPv6
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_9[] = { //IPv4+IPv4
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_GRE,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_10[] = { //IPv4+IPv6
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_GRE,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_11[] = { //IPv6+IPv4
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_GRE,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_12[] = { //IPv6+IPv6
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_GRE,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static enum rte_flow_item_type pattern_qinq_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_VLAN,
@@ -1779,6 +1921,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
{ pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
{ pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
{ pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
+ /*VXLAN with inner header */
+ { pattern_vxlan_5, i40e_flow_parse_fdir_tunnel_filter },
+ { pattern_vxlan_6, i40e_flow_parse_fdir_tunnel_filter },
+ { pattern_vxlan_7, i40e_flow_parse_fdir_tunnel_filter },
+ { pattern_vxlan_8, i40e_flow_parse_fdir_tunnel_filter },
/* NVGRE */
{ pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
{ pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
@@ -1789,6 +1936,15 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
{ pattern_mpls_3, i40e_flow_parse_mpls_filter },
{ pattern_mpls_4, i40e_flow_parse_mpls_filter },
+ /* MPLS patterns with inner header */
+ { pattern_mpls_5, i40e_flow_parse_fdir_tunnel_filter },
+ { pattern_mpls_6, i40e_flow_parse_fdir_tunnel_filter },
+ { pattern_mpls_7, i40e_flow_parse_fdir_tunnel_filter },
+ { pattern_mpls_8, i40e_flow_parse_fdir_tunnel_filter },
+ { pattern_mpls_9, i40e_flow_parse_fdir_tunnel_filter },
+ { pattern_mpls_10, i40e_flow_parse_fdir_tunnel_filter },
+ { pattern_mpls_11, i40e_flow_parse_fdir_tunnel_filter },
+ { pattern_mpls_12, i40e_flow_parse_fdir_tunnel_filter },
/* GTP-C & GTP-U */
{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
@@ -2408,6 +2564,558 @@ i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
return I40E_FILTER_PCTYPE_INVALID;
}
+
+static int
+i40e_flow_parse_fdir_tunnel_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_fdir_filter_conf *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_mpls *mpls_spec, *mpls_mask;
+ const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_gre *gre_spec, *gre_mask;
+ uint8_t pctype = 0;
+ uint64_t input_set = I40E_INSET_NONE;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ uint16_t frag_off;
+ bool outer_ip = true;
+ bool outer_udp = true;
+ int ret;
+ uint8_t ipv6_addr_mask[16] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
+ uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
+
+ filter->input.flow_ext.customized_pctype = false;
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ if (eth_spec && eth_mask) {
+ if (!is_zero_ether_addr(ð_mask->src) ||
+ !is_zero_ether_addr(ð_mask->dst)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MAC_addr mask.");
+ return -rte_errno;
+ }
+ }
+ pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+
+ if (ipv4_spec && ipv4_mask && outer_ip) {
+ /* Check IPv4 mask and update input set */
+ //for outer, we can take only dst IP
+
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.src_addr ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.next_proto_id ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ input_set |= I40E_INSET_TUNNEL_IPV4_DST;
+
+ filter->input.flow.mplsudp4_flow.outer_udp.ip.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+ filter->input.flow_ext.iip_type =
+ I40E_FDIR_IPTYPE_IPV4;
+ } else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
+ //no spec and mask for Inner IP
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid inner IPv4 mask.");
+ return -rte_errno;
+
+ } else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
+ //spec and mask for inner IP, set pctype and input_set
+ pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_PROTO;
+
+ /* Check if it is fragment. */
+ frag_off = ipv4_spec->hdr.fragment_offset;
+ frag_off = rte_be_to_cpu_16(frag_off);
+ if (frag_off & IPV4_HDR_OFFSET_MASK ||
+ frag_off & IPV4_HDR_MF_FLAG)
+ pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
+
+ if (filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4 &&
+ filter->input.flow_ext.tunnel_val != VXLAN) {
+ //MPLS with outerIPv4
+ filter->input.flow.mplsudp4_flow.inner_udp.ip.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+ filter->input.flow.mplsudp4_flow.inner_udp.ip.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.mplsudp4_flow.inner_udp.ip.proto =
+ ipv4_spec->hdr.next_proto_id;
+ } else if (filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV6 &&
+ filter->input.flow_ext.tunnel_val != VXLAN) {
+ // MPLS with outer IPv6
+ filter->input.flow.mpls6udp4_flow.inner_udp.ip.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+ filter->input.flow.mpls6udp4_flow.inner_udp.ip.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.mpls6udp4_flow.inner_udp.ip.proto =
+ ipv4_spec->hdr.next_proto_id;
+ } else if (filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4 &&
+ filter->input.flow_ext.tunnel_val == VXLAN) {
+ //VXLAN with outer IPv4
+ filter->input.flow.vxlanudp4_flow.inner_udp.ip.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+ filter->input.flow.vxlanudp4_flow.inner_udp.ip.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.vxlanudp4_flow.inner_udp.ip.proto =
+ ipv4_spec->hdr.next_proto_id;
+ } else {
+ // VXLAN with outer IPv6
+ filter->input.flow.vxlan6udp4_flow.inner_udp.ip.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+ filter->input.flow.vxlan6udp4_flow.inner_udp.ip.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.vxlan6udp4_flow.inner_udp.ip.proto =
+ ipv4_spec->hdr.next_proto_id;
+ }
+ }
+
+ if (outer_ip)
+ outer_ip = false;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
+
+ if (ipv6_spec && ipv6_mask && outer_ip) {
+ /* Check IPv6 mask and update input set, we take only dst IP */
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+
+ if (!memcmp(ipv6_mask->hdr.dst_addr,
+ ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.dst_addr)))
+ input_set |= I40E_INSET_TUNNEL_IPV6_DST;
+
+ /* Get filter info */
+
+ filter->input.flow.mpls6udp4_flow.outer6_udp.ip.proto =
+ ipv6_spec->hdr.proto;
+ rte_memcpy(filter->input.flow.mpls6udp4_flow.outer6_udp.ip.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+ filter->input.flow_ext.iip_type =
+ I40E_FDIR_IPTYPE_IPV6;
+
+ } else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
+ //no spec,mask for inner IPv6
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid inner IPv6 mask.");
+ return -rte_errno;
+ } else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
+ //spec,mask mentioned for inner IPv6
+ pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+
+ if (!memcmp(ipv6_mask->hdr.src_addr,
+ ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.src_addr)))
+ input_set |= I40E_INSET_IPV6_SRC;
+ if (!memcmp(ipv6_mask->hdr.dst_addr,
+ ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.dst_addr)))
+ input_set |= I40E_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
+ == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
+ input_set |= I40E_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+
+ /* Get filter info */
+ if (filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4 &&
+ filter->input.flow_ext.tunnel_val != VXLAN) {
+ filter->input.flow.mplsudp6_flow.inner6_udp.ip.proto =
+ ipv6_spec->hdr.proto;
+ rte_memcpy(filter->input.flow.mplsudp6_flow.inner6_udp.ip.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.mplsudp6_flow.inner6_udp.ip.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ } else if (filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV6 &&
+ filter->input.flow_ext.tunnel_val != VXLAN) {
+ filter->input.flow.mpls6udp6_flow.inner6_udp.ip.proto =
+ ipv6_spec->hdr.proto;
+ rte_memcpy(filter->input.flow.mpls6udp6_flow.inner6_udp.ip.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.mpls6udp6_flow.inner6_udp.ip.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ } else if (filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4 &&
+ filter->input.flow_ext.tunnel_val == VXLAN) {
+ filter->input.flow.vxlanudp6_flow.inner6_udp.ip.proto =
+ ipv6_spec->hdr.proto;
+ rte_memcpy(filter->input.flow.vxlanudp6_flow.inner6_udp.ip.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.vxlanudp6_flow.inner6_udp.ip.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ } else {
+ filter->input.flow.vxlan6udp6_flow.inner6_udp.ip.proto =
+ ipv6_spec->hdr.proto;
+ rte_memcpy(filter->input.flow.vxlan6udp6_flow.inner6_udp.ip.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.vxlan6udp6_flow.inner6_udp.ip.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+ }
+ /* Check if it is fragment. */
+ if (ipv6_spec->hdr.proto ==
+ I40E_IPV6_FRAG_HEADER)
+ pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
+ }
+
+ if (outer_ip)
+ outer_ip = false;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ gre_spec = item->spec;
+ gre_mask = item->mask;
+ if (gre_spec && gre_mask) {
+ if (gre_mask->c_rsvd0_ver) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid GRE mask");
+ return -rte_errno;
+ }
+
+ if (gre_mask->protocol == UINT16_MAX)
+ input_set |=
+ I40E_INSET_TUNNEL_DST_PORT;
+
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.mplsgre4_flow.outer_gre.gre.protocol =
+ gre_spec->protocol;
+ } else {
+ filter->input.flow.mpls6gre4_flow.outer6_gre.gre.protocol =
+ gre_spec->protocol;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid GRE mask");
+ return -rte_errno;
+ }
+ outer_udp = false;
+ filter->input.flow_ext.tunnel_val = MPLSOGRE;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+
+ if (udp_spec && udp_mask && outer_udp) {
+ /* Check UDP mask and update input set */
+ //for outer, we can take only dst port
+
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.src_port ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+ if (udp_mask->hdr.dst_port == UINT16_MAX)
+ input_set |= I40E_INSET_TUNNEL_DST_PORT;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ //outer UDP for outer IPv4
+ filter->input.flow.mplsudp4_flow.outer_udp.dst_port =
+ udp_spec->hdr.dst_port;
+ } else {
+ filter->input.flow.mpls6udp4_flow.outer6_udp.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+ } else if (!udp_spec && !udp_mask && !outer_udp) {
+ //no spec and mask for Inner UDP
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid inner IPv4 mask.");
+ return -rte_errno;
+
+ } else if ((udp_spec || udp_mask) && !outer_udp) {
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ pctype =
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ pctype =
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ /* Check UDP mask and update input set */
+ //for inner, we can take dst port and src port
+
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+ if (udp_mask->hdr.src_port == UINT16_MAX)
+ input_set |= I40E_INSET_SRC_PORT;
+ if (udp_mask->hdr.dst_port == UINT16_MAX)
+ input_set |= I40E_INSET_DST_PORT;
+ if (filter->input.flow_ext.tunnel_val !=
+ VXLAN) {
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4) {//both ipv4
+ filter->input.flow.mplsudp4_flow.inner_udp.dst_port =
+ udp_spec->hdr.dst_port;
+ filter->input.flow.mplsudp4_flow.inner_udp.src_port =
+ udp_spec->hdr.src_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV6) {
+ filter->input.flow.mpls6udp4_flow.inner_udp.dst_port =
+ udp_spec->hdr.dst_port;
+ filter->input.flow.mpls6udp4_flow.inner_udp.src_port =
+ udp_spec->hdr.src_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6 &&
+ filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4) {
+ filter->input.flow.mplsudp6_flow.inner6_udp.dst_port =
+ udp_spec->hdr.dst_port;
+ filter->input.flow.mplsudp6_flow.inner6_udp.src_port =
+ udp_spec->hdr.src_port;
+ } else {//both IPv6
+ filter->input.flow.mpls6udp6_flow.inner6_udp.dst_port =
+ udp_spec->hdr.dst_port;
+ filter->input.flow.mpls6udp6_flow.inner6_udp.src_port =
+ udp_spec->hdr.src_port;
+ }
+ } else {
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4) {//both v4
+ filter->input.flow.vxlanudp4_flow.inner_udp.dst_port =
+ udp_spec->hdr.dst_port;
+ filter->input.flow.vxlanudp4_flow.inner_udp.src_port =
+ udp_spec->hdr.src_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV6) {//innerv4,outerv6
+ filter->input.flow.vxlan6udp4_flow.inner_udp.dst_port =
+ udp_spec->hdr.dst_port;
+ filter->input.flow.vxlan6udp4_flow.inner_udp.src_port =
+ udp_spec->hdr.src_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6 &&
+ filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4) {//outerv4,innerv6
+ filter->input.flow.vxlanudp6_flow.inner6_udp.dst_port =
+ udp_spec->hdr.dst_port;
+ filter->input.flow.vxlanudp6_flow.inner6_udp.src_port =
+ udp_spec->hdr.src_port;
+ } else { //both IPv6
+ filter->input.flow.vxlan6udp6_flow.inner6_udp.dst_port =
+ udp_spec->hdr.dst_port;
+ filter->input.flow.vxlan6udp6_flow.inner6_udp.src_port =
+ udp_spec->hdr.src_port;
+ }
+ }
+ }
+ if (outer_udp)
+ outer_udp = false;
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ mpls_spec = item->spec;
+ mpls_mask = item->mask;
+ if (mpls_spec && mpls_mask) {
+ if (memcmp(mpls_mask->label_tc_s,
+ label_mask, 3)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MPLS label mask");
+ return -rte_errno;
+ } else {
+ input_set |= I40E_INSET_TUNNEL_ID;
+ }
+ if (filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4) {
+ filter->input.flow.mplsudp4_flow.mpls.label_tc_s[0] =
+ mpls_spec->label_tc_s[0];
+ filter->input.flow.mplsudp4_flow.mpls.label_tc_s[1] =
+ mpls_spec->label_tc_s[1];
+ filter->input.flow.mplsudp4_flow.mpls.label_tc_s[2] =
+ mpls_spec->label_tc_s[2];
+
+ } else {
+ filter->input.flow.mpls6udp4_flow.mpls.label_tc_s[0] =
+ mpls_spec->label_tc_s[0];
+ filter->input.flow.mpls6udp4_flow.mpls.label_tc_s[1] =
+ mpls_spec->label_tc_s[1];
+ filter->input.flow.mpls6udp4_flow.mpls.label_tc_s[2] =
+ mpls_spec->label_tc_s[2];
+ }
+ }
+ filter->input.flow_ext.tunnel = true;
+ if (filter->input.flow_ext.tunnel_val !=
+ MPLSOGRE) //it has not been set by GRE header
+ filter->input.flow_ext.tunnel_val = MPLSOUDP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan_spec = item->spec;
+ vxlan_mask = item->mask;
+ /* Check if VXLAN item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!vxlan_spec && !vxlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ /* Check if VNI is masked. */
+ if (vxlan_spec && vxlan_mask) {
+ if (memcmp(vxlan_mask->vni, vni_mask, 3)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN mask");
+ return -rte_errno;
+ } else {
+ input_set |= I40E_INSET_TUNNEL_ID;
+ }
+
+ if (filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4) {
+ filter->input.flow.vxlanudp4_flow.vxlan.vni[0] =
+ vxlan_spec->vni[0];
+ filter->input.flow.vxlanudp4_flow.vxlan.vni[1] =
+ vxlan_spec->vni[1];
+ filter->input.flow.vxlanudp4_flow.vxlan.vni[2] =
+ vxlan_spec->vni[2];
+ } else {
+ filter->input.flow.vxlan6udp4_flow.vxlan.vni[0] =
+ vxlan_spec->vni[0];
+ filter->input.flow.vxlan6udp4_flow.vxlan.vni[1] =
+ vxlan_spec->vni[1];
+ filter->input.flow.vxlan6udp4_flow.vxlan.vni[2] =
+ vxlan_spec->vni[2];
+ }
+ }
+ filter->input.flow_ext.tunnel = true;
+ filter->input.flow_ext.tunnel_val = VXLAN;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* If customized pctype is not used, set fdir configuration.*/
+ if (!filter->input.flow_ext.customized_pctype) {
+ ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
+ if (ret == -1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Conflict with the first rule's input set.");
+ return -rte_errno;
+ } else if (ret == -EINVAL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid pattern mask.");
+ return -rte_errno;
+ }
+ }
+ filter->input.pctype = pctype;
+ return 0;
+}
+
/* 1. Last in item should be NULL as range is not supported.
* 2. Supported patterns: refer to array i40e_supported_patterns.
* 3. Default supported flow type and input set: refer to array
@@ -3011,7 +3719,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
"Invalid pattern mask.");
return -rte_errno;
}
-
+ filter->input.flow_ext.tunnel = false;
/* Store flex mask to SW */
ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
if (ret == -1) {
@@ -3079,6 +3787,9 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_PASSTHRU:
filter->action.behavior = I40E_FDIR_PASSTHRU;
break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ filter->action.behavior = I40E_FDIR_PASSTHRU;
+ break;
default:
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, act,
@@ -3120,6 +3831,57 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
}
static int
+i40e_flow_parse_fdir_tunnel_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_fdir_filter_conf *fdir_filter =
+ &filter->fdir_filter;
+ int ret;
+
+ ret = i40e_flow_parse_fdir_tunnel_pattern(dev, pattern, error,
+ fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_FDIR;
+
+ if (dev->data->dev_conf.fdir_conf.mode !=
+ RTE_FDIR_MODE_PERFECT) {
+ ret = i40e_fdir_setup(pf);
+ if (ret != I40E_SUCCESS) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to setup fdir");
+ return -rte_errno;
+ }
+ ret = i40e_fdir_configure(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to configure fdir");
+ goto err;
+ }
+ dev->data->dev_conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
+ }
+ return 0;
+
+err:
+ i40e_fdir_teardown(pf);
+ return -rte_errno;
+}
+
+static int
i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -4558,6 +5320,7 @@ i40e_flow_validate(struct rte_eth_dev *dev,
uint32_t item_num = 0; /* non-void item number of pattern*/
uint32_t i = 0;
bool flag = false;
+ bool rss = false;
int ret = I40E_NOT_SUPPORTED;
if (!pattern) {
@@ -4587,9 +5350,7 @@ i40e_flow_validate(struct rte_eth_dev *dev,
i++;
if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
- ret = i40e_parse_rss_filter(dev, attr, pattern,
- actions, &cons_filter, error);
- return ret;
+ rss = true;
}
i = 0;
@@ -4621,9 +5382,17 @@ i40e_flow_validate(struct rte_eth_dev *dev,
rte_free(items);
return -rte_errno;
}
- if (parse_filter)
- ret = parse_filter(dev, attr, items, actions,
+ if (parse_filter) {
+ if (parse_filter != TUNNEL_FDIR_FUNCTION &&
+ rss == true) {
+ ret = i40e_parse_rss_filter(dev, attr, pattern,
+ actions, &cons_filter, error);
+ return ret;
+ } else {
+ ret = parse_filter(dev, attr, items, actions,
error, &cons_filter);
+ }
+ }
flag = true;
} while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
--
2.7.4
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dpdk-dev] [PATCH] net-i40e-Match-on-outer-and-inner-headers-for-tunnel
2018-12-05 11:13 [dpdk-dev] [PATCH] net-i40e-Match-on-outer-and-inner-headers-for-tunnel subarna.kar
@ 2018-12-06 11:44 ` Zhang, Qi Z
2018-12-07 4:52 ` Tu, Lijuan
0 siblings, 1 reply; 6+ messages in thread
From: Zhang, Qi Z @ 2018-12-06 11:44 UTC (permalink / raw)
To: Kar, Subarna, dev; +Cc: Wu, Jingjing, Gasparakis, Joseph
Hi Subarna:
> -----Original Message-----
> From: Kar, Subarna
> Sent: Wednesday, December 5, 2018 7:13 PM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Kar, Subarna <subarna.kar@intel.com>
> Subject: [PATCH] net-i40e-Match-on-outer-and-inner-headers-for-tunnel
The title is not following DPDK style
it could be
net/i40e: match on outer and inner headers for tunnel.
Also please add detail commit log for such a large patch.
Btw, the patch is too large, it's better to separate it into small ones
So far what I can suggestion is, at least you can separate into 2 patches
One for new feature in fdir low level and one for bridge rte_flow to fdir.
Below are some captures
>
> From: Subarna Kar <subarna.kar@intel.com>
>
> ---
> drivers/net/i40e/i40e_ethdev.c | 14 +-
> drivers/net/i40e/i40e_ethdev.h | 109 +++++-
> drivers/net/i40e/i40e_fdir.c | 392 +++++++++++++++++++--
> drivers/net/i40e/i40e_flow.c | 781
> ++++++++++++++++++++++++++++++++++++++++-
> 4 files changed, 1250 insertions(+), 46 deletions(-)
>
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index 7030eb1..0e9f22d 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
> @@ -1788,6 +1788,7 @@ i40e_dev_configure(struct rte_eth_dev *dev)
> * legacy filter API is deprecated, the following codes should also be
> * removed.
> */
> + dev->data->dev_conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
This is not correct, this will prevent vPMD be used for all cases,
I know vPMD should not be used when the new flow be enabled,
and below patch has some issue will break this, but we should fix on that patch anyway.
https://patches.dpdk.org/patch/45697/
> diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
> index 11ecfc3..f7311ce 100644
> --- a/drivers/net/i40e/i40e_ethdev.h
> +++ b/drivers/net/i40e/i40e_ethdev.h
> @@ -466,6 +466,92 @@ struct i40e_vmdq_info {
> #define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
> #define I40E_FDIR_IPv6_TC_OFFSET 20
>
> static inline int
> +i40e_flow_fdir_fill_outer_header(const struct i40e_fdir_input *fdir_input,
> + unsigned char *raw_pkt)
> +{
> + struct ipv4_hdr *ip;
> + struct ipv6_hdr *ip6;
> + struct udp_hdr *udp;
> + struct rte_flow_item_gre *gre;
> + struct rte_flow_item_mpls *mpls;
> + struct rte_flow_item_vxlan *vxlan;
> + uint16_t *ether_type;
> + uint8_t len = 2 * sizeof(struct ether_addr);
> + raw_pkt += 2 * sizeof(struct ether_addr);
> + ether_type = (uint16_t *)raw_pkt;
> + raw_pkt += sizeof(uint16_t);
> + len += sizeof(uint16_t);
> +
> +
> + //we don't need ETH, so direct skip to IP
It's better to use /* xxx */ to follow coding style
And same comment for all other places
(except the case that you see nearby comment is already broken and you just follow it :))
> + vxlan = (struct rte_flow_item_vxlan *)raw_pkt;
> + if (fdir_input->flow_ext.iip_type == I40E_FDIR_IPTYPE_IPV4) {
> + vxlan->vni[0] =
> + fdir_input->flow.vxlanudp4_flow.vxlan.vni[0];
> + vxlan->vni[1] =
> + fdir_input->flow.vxlanudp4_flow.vxlan.vni[1];
> + vxlan->vni[2] =
> + fdir_input->flow.vxlanudp4_flow.vxlan.vni[2];
> + } else {
> + vxlan->vni[0] =
> + fdir_input->flow.vxlan6udp4_flow.vxlan.vni[0];
> + vxlan->vni[1] =
> + fdir_input->flow.vxlan6udp4_flow.vxlan.vni[1];
> + vxlan->vni[2] =
> + fdir_input->flow.vxlan6udp4_flow.vxlan.vni[2];
> + }
> + vxlan->flags = VXLAN_DEFAULT_FLAG;
> + len += sizeof(struct rte_flow_item_vxlan);
> + } else {
> + //now put MPLS info for MPLSoUDP and MPLSoGRE
The intend is not correct, replace spaces with tab
> +
> +static int
> +i40e_flow_parse_fdir_tunnel_pattern(struct rte_eth_dev *dev,
> + const struct rte_flow_item *pattern,
> + struct rte_flow_error *error,
> + struct i40e_fdir_filter_conf *filter)
There is couple intend issue in this function, please double check.
>
> i = 0;
> @@ -4621,9 +5382,17 @@ i40e_flow_validate(struct rte_eth_dev *dev,
> rte_free(items);
> return -rte_errno;
> }
> - if (parse_filter)
> - ret = parse_filter(dev, attr, items, actions,
> + if (parse_filter) {
> + if (parse_filter != TUNNEL_FDIR_FUNCTION &&
> + rss == true) {
s/rss == true/rss
> + ret = i40e_parse_rss_filter(dev, attr, pattern,
> + actions, &cons_filter, error);
> + return ret;
> + } else {
> + ret = parse_filter(dev, attr, items, actions,
> error, &cons_filter);
> + }
> + }
> flag = true;
> } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
>
> --
> 2.7.4
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dpdk-dev] [PATCH] net-i40e-Match-on-outer-and-inner-headers-for-tunnel
2018-12-06 11:44 ` Zhang, Qi Z
@ 2018-12-07 4:52 ` Tu, Lijuan
2018-12-07 4:58 ` Kar, Subarna
0 siblings, 1 reply; 6+ messages in thread
From: Tu, Lijuan @ 2018-12-07 4:52 UTC (permalink / raw)
To: Zhang, Qi Z, Kar, Subarna, dev, Tu, Lijuan
Cc: Wu, Jingjing, Gasparakis, Joseph
Hi Subarna,
This patch cause a big performance drop , ~30% with single core performance test case.
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Zhang, Qi Z
> Sent: Thursday, December 6, 2018 7:45 PM
> To: Kar, Subarna <subarna.kar@intel.com>; dev@dpdk.org
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Gasparakis, Joseph
> <joseph.gasparakis@intel.com>
> Subject: Re: [dpdk-dev] [PATCH]
> net-i40e-Match-on-outer-and-inner-headers-for-tunnel
>
> Hi Subarna:
>
> > -----Original Message-----
> > From: Kar, Subarna
> > Sent: Wednesday, December 5, 2018 7:13 PM
> > To: dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Wu, Jingjing
> > <jingjing.wu@intel.com>; Kar, Subarna <subarna.kar@intel.com>
> > Subject: [PATCH] net-i40e-Match-on-outer-and-inner-headers-for-tunnel
>
> The title is not following DPDK style
> it could be
> net/i40e: match on outer and inner headers for tunnel.
>
> Also please add detail commit log for such a large patch.
>
> Btw, the patch is too large, it's better to separate it into small ones
>
> So far what I can suggestion is, at least you can separate into 2 patches One
> for new feature in fdir low level and one for bridge rte_flow to fdir.
>
> Below are some captures
> >
> > From: Subarna Kar <subarna.kar@intel.com>
> >
> > ---
> > drivers/net/i40e/i40e_ethdev.c | 14 +-
> > drivers/net/i40e/i40e_ethdev.h | 109 +++++-
> > drivers/net/i40e/i40e_fdir.c | 392 +++++++++++++++++++--
> > drivers/net/i40e/i40e_flow.c | 781
> > ++++++++++++++++++++++++++++++++++++++++-
> > 4 files changed, 1250 insertions(+), 46 deletions(-)
> >
> > diff --git a/drivers/net/i40e/i40e_ethdev.c
> > b/drivers/net/i40e/i40e_ethdev.c index 7030eb1..0e9f22d 100644
> > --- a/drivers/net/i40e/i40e_ethdev.c
> > +++ b/drivers/net/i40e/i40e_ethdev.c
> > @@ -1788,6 +1788,7 @@ i40e_dev_configure(struct rte_eth_dev *dev)
> > * legacy filter API is deprecated, the following codes should also be
> > * removed.
> > */
> > + dev->data->dev_conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
>
> This is not correct, this will prevent vPMD be used for all cases, I know vPMD
> should not be used when the new flow be enabled, and below patch has
> some issue will break this, but we should fix on that patch anyway.
> https://patches.dpdk.org/patch/45697/
>
> > diff --git a/drivers/net/i40e/i40e_ethdev.h
> > b/drivers/net/i40e/i40e_ethdev.h index 11ecfc3..f7311ce 100644
> > --- a/drivers/net/i40e/i40e_ethdev.h
> > +++ b/drivers/net/i40e/i40e_ethdev.h
> > @@ -466,6 +466,92 @@ struct i40e_vmdq_info { #define
> > I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
> > #define I40E_FDIR_IPv6_TC_OFFSET 20
> >
> > static inline int
> > +i40e_flow_fdir_fill_outer_header(const struct i40e_fdir_input *fdir_input,
> > + unsigned char *raw_pkt)
> > +{
> > + struct ipv4_hdr *ip;
> > + struct ipv6_hdr *ip6;
> > + struct udp_hdr *udp;
> > + struct rte_flow_item_gre *gre;
> > + struct rte_flow_item_mpls *mpls;
> > + struct rte_flow_item_vxlan *vxlan;
> > + uint16_t *ether_type;
> > + uint8_t len = 2 * sizeof(struct ether_addr);
> > + raw_pkt += 2 * sizeof(struct ether_addr);
> > + ether_type = (uint16_t *)raw_pkt;
> > + raw_pkt += sizeof(uint16_t);
> > + len += sizeof(uint16_t);
> > +
> > +
> > + //we don't need ETH, so direct skip to IP
>
> It's better to use /* xxx */ to follow coding style And same comment for all
> other places (except the case that you see nearby comment is already
> broken and you just follow it :))
>
> > + vxlan = (struct rte_flow_item_vxlan *)raw_pkt;
> > + if (fdir_input->flow_ext.iip_type == I40E_FDIR_IPTYPE_IPV4) {
> > + vxlan->vni[0] =
> > + fdir_input->flow.vxlanudp4_flow.vxlan.vni[0];
> > + vxlan->vni[1] =
> > + fdir_input->flow.vxlanudp4_flow.vxlan.vni[1];
> > + vxlan->vni[2] =
> > + fdir_input->flow.vxlanudp4_flow.vxlan.vni[2];
> > + } else {
> > + vxlan->vni[0] =
> > + fdir_input->flow.vxlan6udp4_flow.vxlan.vni[0];
> > + vxlan->vni[1] =
> > + fdir_input->flow.vxlan6udp4_flow.vxlan.vni[1];
> > + vxlan->vni[2] =
> > + fdir_input->flow.vxlan6udp4_flow.vxlan.vni[2];
> > + }
> > + vxlan->flags = VXLAN_DEFAULT_FLAG;
> > + len += sizeof(struct rte_flow_item_vxlan);
> > + } else {
> > + //now put MPLS info for MPLSoUDP and MPLSoGRE
>
> The intend is not correct, replace spaces with tab
>
> > +
> > +static int
> > +i40e_flow_parse_fdir_tunnel_pattern(struct rte_eth_dev *dev,
> > + const struct rte_flow_item *pattern,
> > + struct rte_flow_error *error,
> > + struct i40e_fdir_filter_conf *filter)
>
> There is couple intend issue in this function, please double check.
>
> >
> > i = 0;
> > @@ -4621,9 +5382,17 @@ i40e_flow_validate(struct rte_eth_dev *dev,
> > rte_free(items);
> > return -rte_errno;
> > }
> > - if (parse_filter)
> > - ret = parse_filter(dev, attr, items, actions,
> > + if (parse_filter) {
> > + if (parse_filter != TUNNEL_FDIR_FUNCTION &&
> > + rss == true) {
>
> s/rss == true/rss
>
> > + ret = i40e_parse_rss_filter(dev, attr, pattern,
> > + actions, &cons_filter, error);
> > + return ret;
> > + } else {
> > + ret = parse_filter(dev, attr, items, actions,
> > error, &cons_filter);
> > + }
> > + }
> > flag = true;
> > } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
> >
> > --
> > 2.7.4
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dpdk-dev] [PATCH] net-i40e-Match-on-outer-and-inner-headers-for-tunnel
2018-12-07 4:52 ` Tu, Lijuan
@ 2018-12-07 4:58 ` Kar, Subarna
2018-12-07 5:05 ` Tu, Lijuan
2018-12-07 5:09 ` Zhang, Qi Z
0 siblings, 2 replies; 6+ messages in thread
From: Kar, Subarna @ 2018-12-07 4:58 UTC (permalink / raw)
To: Tu, Lijuan, Zhang, Qi Z, dev; +Cc: Wu, Jingjing, Gasparakis, Joseph
Can I ask what kind of packets you are sending? Because your packets shouldn't hit my code. This is to accept flows for tunneled pkts on outer and inner headers.
-----Original Message-----
From: Tu, Lijuan
Sent: Thursday, December 6, 2018 8:52 PM
To: Zhang, Qi Z <qi.z.zhang@intel.com>; Kar, Subarna <subarna.kar@intel.com>; dev@dpdk.org; Tu, Lijuan <lijuan.tu@intel.com>
Cc: Wu, Jingjing <jingjing.wu@intel.com>; Gasparakis, Joseph <joseph.gasparakis@intel.com>
Subject: RE: [dpdk-dev] [PATCH] net-i40e-Match-on-outer-and-inner-headers-for-tunnel
Hi Subarna,
This patch cause a big performance drop , ~30% with single core performance test case.
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Zhang, Qi Z
> Sent: Thursday, December 6, 2018 7:45 PM
> To: Kar, Subarna <subarna.kar@intel.com>; dev@dpdk.org
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Gasparakis, Joseph
> <joseph.gasparakis@intel.com>
> Subject: Re: [dpdk-dev] [PATCH]
> net-i40e-Match-on-outer-and-inner-headers-for-tunnel
>
> Hi Subarna:
>
> > -----Original Message-----
> > From: Kar, Subarna
> > Sent: Wednesday, December 5, 2018 7:13 PM
> > To: dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Wu, Jingjing
> > <jingjing.wu@intel.com>; Kar, Subarna <subarna.kar@intel.com>
> > Subject: [PATCH]
> > net-i40e-Match-on-outer-and-inner-headers-for-tunnel
>
> The title is not following DPDK style
> it could be
> net/i40e: match on outer and inner headers for tunnel.
>
> Also please add detail commit log for such a large patch.
>
> Btw, the patch is too large, it's better to separate it into small
> ones
>
> So far what I can suggestion is, at least you can separate into 2
> patches One for new feature in fdir low level and one for bridge rte_flow to fdir.
>
> Below are some captures
> >
> > From: Subarna Kar <subarna.kar@intel.com>
> >
> > ---
> > drivers/net/i40e/i40e_ethdev.c | 14 +-
> > drivers/net/i40e/i40e_ethdev.h | 109 +++++-
> > drivers/net/i40e/i40e_fdir.c | 392 +++++++++++++++++++--
> > drivers/net/i40e/i40e_flow.c | 781
> > ++++++++++++++++++++++++++++++++++++++++-
> > 4 files changed, 1250 insertions(+), 46 deletions(-)
> >
> > diff --git a/drivers/net/i40e/i40e_ethdev.c
> > b/drivers/net/i40e/i40e_ethdev.c index 7030eb1..0e9f22d 100644
> > --- a/drivers/net/i40e/i40e_ethdev.c
> > +++ b/drivers/net/i40e/i40e_ethdev.c
> > @@ -1788,6 +1788,7 @@ i40e_dev_configure(struct rte_eth_dev *dev)
> > * legacy filter API is deprecated, the following codes should also be
> > * removed.
> > */
> > + dev->data->dev_conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
>
> This is not correct, this will prevent vPMD be used for all cases, I
> know vPMD should not be used when the new flow be enabled, and below
> patch has some issue will break this, but we should fix on that patch anyway.
> https://patches.dpdk.org/patch/45697/
>
> > diff --git a/drivers/net/i40e/i40e_ethdev.h
> > b/drivers/net/i40e/i40e_ethdev.h index 11ecfc3..f7311ce 100644
> > --- a/drivers/net/i40e/i40e_ethdev.h
> > +++ b/drivers/net/i40e/i40e_ethdev.h
> > @@ -466,6 +466,92 @@ struct i40e_vmdq_info { #define
> > I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
> > #define I40E_FDIR_IPv6_TC_OFFSET 20
> >
> > static inline int
> > +i40e_flow_fdir_fill_outer_header(const struct i40e_fdir_input *fdir_input,
> > + unsigned char *raw_pkt)
> > +{
> > + struct ipv4_hdr *ip;
> > + struct ipv6_hdr *ip6;
> > + struct udp_hdr *udp;
> > + struct rte_flow_item_gre *gre;
> > + struct rte_flow_item_mpls *mpls;
> > + struct rte_flow_item_vxlan *vxlan;
> > + uint16_t *ether_type;
> > + uint8_t len = 2 * sizeof(struct ether_addr);
> > + raw_pkt += 2 * sizeof(struct ether_addr);
> > + ether_type = (uint16_t *)raw_pkt;
> > + raw_pkt += sizeof(uint16_t);
> > + len += sizeof(uint16_t);
> > +
> > +
> > + //we don't need ETH, so direct skip to IP
>
> It's better to use /* xxx */ to follow coding style And same comment
> for all other places (except the case that you see nearby comment is
> already broken and you just follow it :))
>
> > + vxlan = (struct rte_flow_item_vxlan *)raw_pkt;
> > + if (fdir_input->flow_ext.iip_type == I40E_FDIR_IPTYPE_IPV4) {
> > + vxlan->vni[0] =
> > + fdir_input->flow.vxlanudp4_flow.vxlan.vni[0];
> > + vxlan->vni[1] =
> > + fdir_input->flow.vxlanudp4_flow.vxlan.vni[1];
> > + vxlan->vni[2] =
> > + fdir_input->flow.vxlanudp4_flow.vxlan.vni[2];
> > + } else {
> > + vxlan->vni[0] =
> > + fdir_input->flow.vxlan6udp4_flow.vxlan.vni[0];
> > + vxlan->vni[1] =
> > + fdir_input->flow.vxlan6udp4_flow.vxlan.vni[1];
> > + vxlan->vni[2] =
> > + fdir_input->flow.vxlan6udp4_flow.vxlan.vni[2];
> > + }
> > + vxlan->flags = VXLAN_DEFAULT_FLAG;
> > + len += sizeof(struct rte_flow_item_vxlan);
> > + } else {
> > + //now put MPLS info for MPLSoUDP and MPLSoGRE
>
> The intend is not correct, replace spaces with tab
>
> > +
> > +static int
> > +i40e_flow_parse_fdir_tunnel_pattern(struct rte_eth_dev *dev,
> > + const struct rte_flow_item *pattern,
> > + struct rte_flow_error *error,
> > + struct i40e_fdir_filter_conf *filter)
>
> There is couple intend issue in this function, please double check.
>
> >
> > i = 0;
> > @@ -4621,9 +5382,17 @@ i40e_flow_validate(struct rte_eth_dev *dev,
> > rte_free(items);
> > return -rte_errno;
> > }
> > - if (parse_filter)
> > - ret = parse_filter(dev, attr, items, actions,
> > + if (parse_filter) {
> > + if (parse_filter != TUNNEL_FDIR_FUNCTION &&
> > + rss == true) {
>
> s/rss == true/rss
>
> > + ret = i40e_parse_rss_filter(dev, attr, pattern,
> > + actions, &cons_filter, error);
> > + return ret;
> > + } else {
> > + ret = parse_filter(dev, attr, items, actions,
> > error, &cons_filter);
> > + }
> > + }
> > flag = true;
> > } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
> >
> > --
> > 2.7.4
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dpdk-dev] [PATCH] net-i40e-Match-on-outer-and-inner-headers-for-tunnel
2018-12-07 4:58 ` Kar, Subarna
@ 2018-12-07 5:05 ` Tu, Lijuan
2018-12-07 5:09 ` Zhang, Qi Z
1 sibling, 0 replies; 6+ messages in thread
From: Tu, Lijuan @ 2018-12-07 5:05 UTC (permalink / raw)
To: Kar, Subarna, Zhang, Qi Z, dev; +Cc: Wu, Jingjing, Gasparakis, Joseph
General ipv4 packets, scapy sample packet like this : Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/("X"*26)]
> -----Original Message-----
> From: Kar, Subarna
> Sent: Friday, December 7, 2018 12:58 PM
> To: Tu, Lijuan <lijuan.tu@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>;
> dev@dpdk.org
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Gasparakis, Joseph
> <joseph.gasparakis@intel.com>
> Subject: RE: [dpdk-dev] [PATCH]
> net-i40e-Match-on-outer-and-inner-headers-for-tunnel
>
> Can I ask what kind of packets you are sending? Because your packets
> shouldn't hit my code. This is to accept flows for tunneled pkts on outer and
> inner headers.
>
> -----Original Message-----
> From: Tu, Lijuan
> Sent: Thursday, December 6, 2018 8:52 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Kar, Subarna
> <subarna.kar@intel.com>; dev@dpdk.org; Tu, Lijuan <lijuan.tu@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Gasparakis, Joseph
> <joseph.gasparakis@intel.com>
> Subject: RE: [dpdk-dev] [PATCH]
> net-i40e-Match-on-outer-and-inner-headers-for-tunnel
>
> Hi Subarna,
>
> This patch cause a big performance drop , ~30% with single core
> performance test case.
>
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Zhang, Qi Z
> > Sent: Thursday, December 6, 2018 7:45 PM
> > To: Kar, Subarna <subarna.kar@intel.com>; dev@dpdk.org
> > Cc: Wu, Jingjing <jingjing.wu@intel.com>; Gasparakis, Joseph
> > <joseph.gasparakis@intel.com>
> > Subject: Re: [dpdk-dev] [PATCH]
> > net-i40e-Match-on-outer-and-inner-headers-for-tunnel
> >
> > Hi Subarna:
> >
> > > -----Original Message-----
> > > From: Kar, Subarna
> > > Sent: Wednesday, December 5, 2018 7:13 PM
> > > To: dev@dpdk.org
> > > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Wu, Jingjing
> > > <jingjing.wu@intel.com>; Kar, Subarna <subarna.kar@intel.com>
> > > Subject: [PATCH]
> > > net-i40e-Match-on-outer-and-inner-headers-for-tunnel
> >
> > The title is not following DPDK style
> > it could be
> > net/i40e: match on outer and inner headers for tunnel.
> >
> > Also please add detail commit log for such a large patch.
> >
> > Btw, the patch is too large, it's better to separate it into small
> > ones
> >
> > So far what I can suggestion is, at least you can separate into 2
> > patches One for new feature in fdir low level and one for bridge rte_flow
> to fdir.
> >
> > Below are some captures
> > >
> > > From: Subarna Kar <subarna.kar@intel.com>
> > >
> > > ---
> > > drivers/net/i40e/i40e_ethdev.c | 14 +-
> > > drivers/net/i40e/i40e_ethdev.h | 109 +++++-
> > > drivers/net/i40e/i40e_fdir.c | 392 +++++++++++++++++++--
> > > drivers/net/i40e/i40e_flow.c | 781
> > > ++++++++++++++++++++++++++++++++++++++++-
> > > 4 files changed, 1250 insertions(+), 46 deletions(-)
> > >
> > > diff --git a/drivers/net/i40e/i40e_ethdev.c
> > > b/drivers/net/i40e/i40e_ethdev.c index 7030eb1..0e9f22d 100644
> > > --- a/drivers/net/i40e/i40e_ethdev.c
> > > +++ b/drivers/net/i40e/i40e_ethdev.c
> > > @@ -1788,6 +1788,7 @@ i40e_dev_configure(struct rte_eth_dev *dev)
> > > * legacy filter API is deprecated, the following codes should also
> be
> > > * removed.
> > > */
> > > + dev->data->dev_conf.fdir_conf.mode =
> RTE_FDIR_MODE_PERFECT;
> >
> > This is not correct, this will prevent vPMD be used for all cases, I
> > know vPMD should not be used when the new flow be enabled, and below
> > patch has some issue will break this, but we should fix on that patch
> anyway.
> > https://patches.dpdk.org/patch/45697/
> >
> > > diff --git a/drivers/net/i40e/i40e_ethdev.h
> > > b/drivers/net/i40e/i40e_ethdev.h index 11ecfc3..f7311ce 100644
> > > --- a/drivers/net/i40e/i40e_ethdev.h
> > > +++ b/drivers/net/i40e/i40e_ethdev.h
> > > @@ -466,6 +466,92 @@ struct i40e_vmdq_info { #define
> > > I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
> > > #define I40E_FDIR_IPv6_TC_OFFSET 20
> > >
> > > static inline int
> > > +i40e_flow_fdir_fill_outer_header(const struct i40e_fdir_input
> *fdir_input,
> > > + unsigned char *raw_pkt)
> > > +{
> > > + struct ipv4_hdr *ip;
> > > + struct ipv6_hdr *ip6;
> > > + struct udp_hdr *udp;
> > > + struct rte_flow_item_gre *gre;
> > > + struct rte_flow_item_mpls *mpls;
> > > + struct rte_flow_item_vxlan *vxlan;
> > > + uint16_t *ether_type;
> > > + uint8_t len = 2 * sizeof(struct ether_addr);
> > > + raw_pkt += 2 * sizeof(struct ether_addr);
> > > + ether_type = (uint16_t *)raw_pkt;
> > > + raw_pkt += sizeof(uint16_t);
> > > + len += sizeof(uint16_t);
> > > +
> > > +
> > > + //we don't need ETH, so direct skip to IP
> >
> > It's better to use /* xxx */ to follow coding style And same comment
> > for all other places (except the case that you see nearby comment is
> > already broken and you just follow it :))
> >
> > > + vxlan = (struct rte_flow_item_vxlan *)raw_pkt;
> > > + if (fdir_input->flow_ext.iip_type == I40E_FDIR_IPTYPE_IPV4) {
> > > + vxlan->vni[0] =
> > > + fdir_input->flow.vxlanudp4_flow.vxlan.vni[0];
> > > + vxlan->vni[1] =
> > > + fdir_input->flow.vxlanudp4_flow.vxlan.vni[1];
> > > + vxlan->vni[2] =
> > > + fdir_input->flow.vxlanudp4_flow.vxlan.vni[2];
> > > + } else {
> > > + vxlan->vni[0] =
> > > + fdir_input->flow.vxlan6udp4_flow.vxlan.vni[0];
> > > + vxlan->vni[1] =
> > > + fdir_input->flow.vxlan6udp4_flow.vxlan.vni[1];
> > > + vxlan->vni[2] =
> > > + fdir_input->flow.vxlan6udp4_flow.vxlan.vni[2];
> > > + }
> > > + vxlan->flags = VXLAN_DEFAULT_FLAG;
> > > + len += sizeof(struct rte_flow_item_vxlan);
> > > + } else {
> > > + //now put MPLS info for MPLSoUDP and MPLSoGRE
> >
> > The intend is not correct, replace spaces with tab
> >
> > > +
> > > +static int
> > > +i40e_flow_parse_fdir_tunnel_pattern(struct rte_eth_dev *dev,
> > > + const struct rte_flow_item *pattern,
> > > + struct rte_flow_error *error,
> > > + struct i40e_fdir_filter_conf *filter)
> >
> > There is couple intend issue in this function, please double check.
> >
> > >
> > > i = 0;
> > > @@ -4621,9 +5382,17 @@ i40e_flow_validate(struct rte_eth_dev *dev,
> > > rte_free(items);
> > > return -rte_errno;
> > > }
> > > - if (parse_filter)
> > > - ret = parse_filter(dev, attr, items, actions,
> > > + if (parse_filter) {
> > > + if (parse_filter != TUNNEL_FDIR_FUNCTION &&
> > > + rss == true) {
> >
> > s/rss == true/rss
> >
> > > + ret = i40e_parse_rss_filter(dev, attr, pattern,
> > > + actions, &cons_filter, error);
> > > + return ret;
> > > + } else {
> > > + ret = parse_filter(dev, attr, items, actions,
> > > error, &cons_filter);
> > > + }
> > > + }
> > > flag = true;
> > > } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
> > >
> > > --
> > > 2.7.4
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dpdk-dev] [PATCH] net-i40e-Match-on-outer-and-inner-headers-for-tunnel
2018-12-07 4:58 ` Kar, Subarna
2018-12-07 5:05 ` Tu, Lijuan
@ 2018-12-07 5:09 ` Zhang, Qi Z
1 sibling, 0 replies; 6+ messages in thread
From: Zhang, Qi Z @ 2018-12-07 5:09 UTC (permalink / raw)
To: Kar, Subarna, Tu, Lijuan, dev; +Cc: Wu, Jingjing, Gasparakis, Joseph
The reason is below code prevent vPMD be used and I have comment as below.
> > > + dev->data->dev_conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
> >
> > This is not correct, this will prevent vPMD be used for all cases, I
> > know vPMD should not be used when the new flow be enabled, and below
> > patch has some issue will break this, but we should fix on that patch anyway.
> > https://patches.dpdk.org/patch/45697/
> -----Original Message-----
> From: Kar, Subarna
> Sent: Friday, December 7, 2018 12:58 PM
> To: Tu, Lijuan <lijuan.tu@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>;
> dev@dpdk.org
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Gasparakis, Joseph
> <joseph.gasparakis@intel.com>
> Subject: RE: [dpdk-dev] [PATCH]
> net-i40e-Match-on-outer-and-inner-headers-for-tunnel
>
> Can I ask what kind of packets you are sending? Because your packets
> shouldn't hit my code. This is to accept flows for tunneled pkts on outer and
> inner headers.
>
> -----Original Message-----
> From: Tu, Lijuan
> Sent: Thursday, December 6, 2018 8:52 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Kar, Subarna
> <subarna.kar@intel.com>; dev@dpdk.org; Tu, Lijuan <lijuan.tu@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Gasparakis, Joseph
> <joseph.gasparakis@intel.com>
> Subject: RE: [dpdk-dev] [PATCH]
> net-i40e-Match-on-outer-and-inner-headers-for-tunnel
>
> Hi Subarna,
>
> This patch cause a big performance drop , ~30% with single core performance
> test case.
>
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Zhang, Qi Z
> > Sent: Thursday, December 6, 2018 7:45 PM
> > To: Kar, Subarna <subarna.kar@intel.com>; dev@dpdk.org
> > Cc: Wu, Jingjing <jingjing.wu@intel.com>; Gasparakis, Joseph
> > <joseph.gasparakis@intel.com>
> > Subject: Re: [dpdk-dev] [PATCH]
> > net-i40e-Match-on-outer-and-inner-headers-for-tunnel
> >
> > Hi Subarna:
> >
> > > -----Original Message-----
> > > From: Kar, Subarna
> > > Sent: Wednesday, December 5, 2018 7:13 PM
> > > To: dev@dpdk.org
> > > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Wu, Jingjing
> > > <jingjing.wu@intel.com>; Kar, Subarna <subarna.kar@intel.com>
> > > Subject: [PATCH]
> > > net-i40e-Match-on-outer-and-inner-headers-for-tunnel
> >
> > The title is not following DPDK style
> > it could be
> > net/i40e: match on outer and inner headers for tunnel.
> >
> > Also please add detail commit log for such a large patch.
> >
> > Btw, the patch is too large, it's better to separate it into small
> > ones
> >
> > So far what I can suggestion is, at least you can separate into 2
> > patches One for new feature in fdir low level and one for bridge rte_flow to
> fdir.
> >
> > Below are some captures
> > >
> > > From: Subarna Kar <subarna.kar@intel.com>
> > >
> > > ---
> > > drivers/net/i40e/i40e_ethdev.c | 14 +-
> > > drivers/net/i40e/i40e_ethdev.h | 109 +++++-
> > > drivers/net/i40e/i40e_fdir.c | 392 +++++++++++++++++++--
> > > drivers/net/i40e/i40e_flow.c | 781
> > > ++++++++++++++++++++++++++++++++++++++++-
> > > 4 files changed, 1250 insertions(+), 46 deletions(-)
> > >
> > > diff --git a/drivers/net/i40e/i40e_ethdev.c
> > > b/drivers/net/i40e/i40e_ethdev.c index 7030eb1..0e9f22d 100644
> > > --- a/drivers/net/i40e/i40e_ethdev.c
> > > +++ b/drivers/net/i40e/i40e_ethdev.c
> > > @@ -1788,6 +1788,7 @@ i40e_dev_configure(struct rte_eth_dev *dev)
> > > * legacy filter API is deprecated, the following codes should also be
> > > * removed.
> > > */
> > > + dev->data->dev_conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
> >
> > This is not correct, this will prevent vPMD be used for all cases, I
> > know vPMD should not be used when the new flow be enabled, and below
> > patch has some issue will break this, but we should fix on that patch anyway.
> > https://patches.dpdk.org/patch/45697/
> >
> > > diff --git a/drivers/net/i40e/i40e_ethdev.h
> > > b/drivers/net/i40e/i40e_ethdev.h index 11ecfc3..f7311ce 100644
> > > --- a/drivers/net/i40e/i40e_ethdev.h
> > > +++ b/drivers/net/i40e/i40e_ethdev.h
> > > @@ -466,6 +466,92 @@ struct i40e_vmdq_info { #define
> > > I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
> > > #define I40E_FDIR_IPv6_TC_OFFSET 20
> > >
> > > static inline int
> > > +i40e_flow_fdir_fill_outer_header(const struct i40e_fdir_input *fdir_input,
> > > + unsigned char *raw_pkt)
> > > +{
> > > + struct ipv4_hdr *ip;
> > > + struct ipv6_hdr *ip6;
> > > + struct udp_hdr *udp;
> > > + struct rte_flow_item_gre *gre;
> > > + struct rte_flow_item_mpls *mpls;
> > > + struct rte_flow_item_vxlan *vxlan;
> > > + uint16_t *ether_type;
> > > + uint8_t len = 2 * sizeof(struct ether_addr);
> > > + raw_pkt += 2 * sizeof(struct ether_addr);
> > > + ether_type = (uint16_t *)raw_pkt;
> > > + raw_pkt += sizeof(uint16_t);
> > > + len += sizeof(uint16_t);
> > > +
> > > +
> > > + //we don't need ETH, so direct skip to IP
> >
> > It's better to use /* xxx */ to follow coding style And same comment
> > for all other places (except the case that you see nearby comment is
> > already broken and you just follow it :))
> >
> > > + vxlan = (struct rte_flow_item_vxlan *)raw_pkt;
> > > + if (fdir_input->flow_ext.iip_type == I40E_FDIR_IPTYPE_IPV4) {
> > > + vxlan->vni[0] =
> > > + fdir_input->flow.vxlanudp4_flow.vxlan.vni[0];
> > > + vxlan->vni[1] =
> > > + fdir_input->flow.vxlanudp4_flow.vxlan.vni[1];
> > > + vxlan->vni[2] =
> > > + fdir_input->flow.vxlanudp4_flow.vxlan.vni[2];
> > > + } else {
> > > + vxlan->vni[0] =
> > > + fdir_input->flow.vxlan6udp4_flow.vxlan.vni[0];
> > > + vxlan->vni[1] =
> > > + fdir_input->flow.vxlan6udp4_flow.vxlan.vni[1];
> > > + vxlan->vni[2] =
> > > + fdir_input->flow.vxlan6udp4_flow.vxlan.vni[2];
> > > + }
> > > + vxlan->flags = VXLAN_DEFAULT_FLAG;
> > > + len += sizeof(struct rte_flow_item_vxlan);
> > > + } else {
> > > + //now put MPLS info for MPLSoUDP and MPLSoGRE
> >
> > The intend is not correct, replace spaces with tab
> >
> > > +
> > > +static int
> > > +i40e_flow_parse_fdir_tunnel_pattern(struct rte_eth_dev *dev,
> > > + const struct rte_flow_item *pattern,
> > > + struct rte_flow_error *error,
> > > + struct i40e_fdir_filter_conf *filter)
> >
> > There is couple intend issue in this function, please double check.
> >
> > >
> > > i = 0;
> > > @@ -4621,9 +5382,17 @@ i40e_flow_validate(struct rte_eth_dev *dev,
> > > rte_free(items);
> > > return -rte_errno;
> > > }
> > > - if (parse_filter)
> > > - ret = parse_filter(dev, attr, items, actions,
> > > + if (parse_filter) {
> > > + if (parse_filter != TUNNEL_FDIR_FUNCTION &&
> > > + rss == true) {
> >
> > s/rss == true/rss
> >
> > > + ret = i40e_parse_rss_filter(dev, attr, pattern,
> > > + actions, &cons_filter, error);
> > > + return ret;
> > > + } else {
> > > + ret = parse_filter(dev, attr, items, actions,
> > > error, &cons_filter);
> > > + }
> > > + }
> > > flag = true;
> > > } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
> > >
> > > --
> > > 2.7.4
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2018-12-07 5:09 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-12-05 11:13 [dpdk-dev] [PATCH] net-i40e-Match-on-outer-and-inner-headers-for-tunnel subarna.kar
2018-12-06 11:44 ` Zhang, Qi Z
2018-12-07 4:52 ` Tu, Lijuan
2018-12-07 4:58 ` Kar, Subarna
2018-12-07 5:05 ` Tu, Lijuan
2018-12-07 5:09 ` Zhang, Qi Z
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).