* [RFC PATCH v3] net/iavf: support rte flow with mask for FDIR
@ 2024-03-25 17:01 Ian Stokes
2024-10-15 23:08 ` Stephen Hemminger
0 siblings, 1 reply; 2+ messages in thread
From: Ian Stokes @ 2024-03-25 17:01 UTC (permalink / raw)
To: dev; +Cc: Ananth S, Zhichao Zeng
From: Ananth S <ananth.s@intel.com>
This patch supports rte flow with mask for FDIR, including
eth/ipv4/ipv6/tcp/udp flow items, where src/dst for ipv4/ipv6
and sport/dport for tcp/udp are realized by switch filter.
This patch additionally contains the fixes for the issues
identified in the patch [21.11.5 v2].
This patch is based on DPDK v21.11.5 LTS
[4e50ad4469f7c037e32de5aa3535d1cd25de0741], for customer cherry-pick.
Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>
Signed-off-by: Ananth S <ananth.s@intel.com>
---
drivers/common/iavf/virtchnl.h | 40 ++++-
drivers/net/iavf/iavf_ethdev.c | 26 +++
drivers/net/iavf/iavf_fdir.c | 305 ++++++++++++++++++---------------
drivers/net/iavf/iavf_hash.c | 112 ++++++------
drivers/net/ice/ice_ethdev.c | 26 +++
lib/ethdev/rte_ethdev.h | 3 +
6 files changed, 320 insertions(+), 192 deletions(-)
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 80e754a1b2..bc8f355db1 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -1482,6 +1482,8 @@ enum virtchnl_vfr_states {
};
#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
+#define VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK 16
+#define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024
#define PROTO_HDR_SHIFT 5
#define PROTO_HDR_FIELD_START(proto_hdr_type) \
(proto_hdr_type << PROTO_HDR_SHIFT)
@@ -1669,6 +1671,22 @@ struct virtchnl_proto_hdr {
VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
+struct virtchnl_proto_hdr_w_msk {
+ /* see enum virtchnl_proto_hdr_type */
+ s32 type;
+ u32 pad;
+ /**
+ * binary buffer in network order for specific header type.
+ * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
+ * header is expected to be copied into the buffer.
+ */
+ u8 buffer_spec[64];
+ /* binary buffer for bit-mask applied to specific header type */
+ u8 buffer_mask[64];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(136, virtchnl_proto_hdr_w_msk);
+
struct virtchnl_proto_hdrs {
u8 tunnel_level;
/**
@@ -1678,8 +1696,26 @@ struct virtchnl_proto_hdrs {
* 2 - from the second inner layer
* ....
**/
- int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
- struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+ int count;
+ /**
+ * count must <=
+ * VIRTCHNL_MAX_NUM_PROTO_HDRS + VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK
+ * count = 0 : select raw
+ * 1 < count <= VIRTCHNL_MAX_NUM_PROTO_HDRS : select proto_hdr
+ * count > VIRTCHNL_MAX_NUM_PROTO_HDRS : select proto_hdr_w_msk
+ * last valid index = count - VIRTCHNL_MAX_NUM_PROTO_HDRS
+ */
+ union {
+ struct virtchnl_proto_hdr
+ proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+ struct virtchnl_proto_hdr_w_msk
+ proto_hdr_w_msk[VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK];
+ struct {
+ u16 pkt_len;
+ u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+ u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+ } raw;
+ };
};
VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index e40af1316d..93d4d03eb9 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -500,6 +500,11 @@ iavf_init_rss(struct iavf_adapter *adapter)
uint16_t i, j, nb_q;
int ret;
+#ifdef RTE_ETHDEV_EXCL_Q
+ uint16_t q_index = 0;
+ uint16_t excl_q = RSS_RETA_EXCL_Q_ID;
+#endif /* RTE_ETHDEV_EXCL_Q */
+
rss_conf = &adapter->dev_data->dev_conf.rx_adv_conf.rss_conf;
nb_q = RTE_MIN(adapter->dev_data->nb_rx_queues,
vf->max_rss_qregion);
@@ -525,6 +530,27 @@ iavf_init_rss(struct iavf_adapter *adapter)
j = 0;
vf->rss_lut[i] = j;
}
+
+#ifdef RTE_ETHDEV_EXCL_Q
+ if (nb_q > 1) {
+ if (excl_q > nb_q) {
+ /* if excl_q index is higher than the max_queue, assign default
+ * RSS LUT
+ */
+ for (i = 0; i < vf->vf_res->rss_lut_size; i++)
+ vf->rss_lut[i] = i % nb_q;
+
+ } else {
+ for (i = 0, q_index = 0; i < vf->vf_res->rss_lut_size; i++, q_index++) {
+ /* Increment q_index to skip excl_q */
+ if (q_index % nb_q == excl_q)
+ q_index++;
+
+ vf->rss_lut[i] = q_index % (nb_q);
+ }
+ }
+ }
+#endif /* RTE_ETHDEV_EXCL_Q */
/* send virtchnl ops to configure RSS */
ret = iavf_configure_rss_lut(adapter);
if (ret)
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index c30853dd94..be7c9da053 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -695,6 +695,7 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
const struct rte_flow_item_ecpri *ecpri_spec, *ecpri_mask;
const struct rte_flow_item_gre *gre_spec, *gre_mask;
const struct rte_flow_item *item = pattern;
+ struct virtchnl_proto_hdr_w_msk *hdr_w_msk, *hdr1_w_msk = NULL;
struct virtchnl_proto_hdr *hdr, *hdr1 = NULL;
struct rte_ecpri_common_hdr ecpri_common;
uint64_t input_set = IAVF_INSET_NONE;
@@ -702,8 +703,12 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
enum rte_flow_item_type next_type;
uint8_t tun_inner = 0;
uint16_t ether_type;
+ int with_mask = 0;
int layer = 0;
-
+ uint8_t ipv6_zero_mask[16] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
uint8_t ipv6_addr_mask[16] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
@@ -727,8 +732,10 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
next_type = (item + 1)->type;
hdr1 = &hdrs->proto_hdr[layer];
+ hdr1_w_msk = &hdrs->proto_hdr_w_msk[layer];
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1_w_msk, ETH);
if (next_type == RTE_FLOW_ITEM_TYPE_END &&
(!eth_spec || !eth_mask)) {
@@ -739,39 +746,59 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
}
if (eth_spec && eth_mask) {
- if (!rte_is_zero_ether_addr(ð_mask->src) ||
- !rte_is_zero_ether_addr(ð_mask->dst)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Invalid MAC_addr mask.");
- return -rte_errno;
- }
- }
-
- if (eth_spec && eth_mask && eth_mask->type) {
- if (eth_mask->type != RTE_BE16(0xffff)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Invalid type mask.");
- return -rte_errno;
- }
-
- ether_type = rte_be_to_cpu_16(eth_spec->type);
- if (ether_type == RTE_ETHER_TYPE_IPV4 ||
- ether_type == RTE_ETHER_TYPE_IPV6) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Unsupported ether_type.");
- return -rte_errno;
+ if ((!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr) &&
+ !rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr)) ||
+ (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr) &&
+ !rte_is_broadcast_ether_addr(ð_mask->hdr.src_addr))) {
+ if (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr))
+ input_set |= IAVF_INSET_DMAC;
+ if (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr))
+ input_set |= IAVF_INSET_SMAC;
+ if (eth_mask->hdr.ether_type)
+ input_set |= IAVF_INSET_ETHERTYPE;
+ rte_memcpy(hdr1_w_msk->buffer_spec, eth_spec,
+ sizeof(struct rte_ether_hdr));
+ rte_memcpy(hdr1_w_msk->buffer_mask, eth_mask,
+ sizeof(struct rte_ether_hdr));
+ with_mask = 1;
+ } else {
+ if (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr)) {
+ input_set |= IAVF_INSET_DMAC;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1,
+ ETH,
+ DST);
+ } else if (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr)) {
+ input_set |= IAVF_INSET_SMAC;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1,
+ ETH,
+ SRC);
+ }
+
+ if (eth_spec && eth_mask && eth_mask->type) {
+ if (eth_mask->type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid type mask.");
+ return -rte_errno;
+ }
+
+ ether_type = rte_be_to_cpu_16(eth_spec->type);
+ if (ether_type == RTE_ETHER_TYPE_IPV4 ||
+ ether_type == RTE_ETHER_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ether_type.");
+ return -rte_errno;
+ }
+
+ input_set |= IAVF_INSET_ETHERTYPE;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
+ ETHERTYPE);
+ }
+ rte_memcpy(hdr1->buffer, eth_spec,
+ sizeof(struct rte_ether_hdr));
}
-
- input_set |= IAVF_INSET_ETHERTYPE;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
- ETHERTYPE);
-
- rte_memcpy(hdr1->buffer, eth_spec,
- sizeof(struct rte_ether_hdr));
}
hdrs->count = ++layer;
@@ -785,8 +812,10 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
next_type = (item + 1)->type;
hdr = &hdrs->proto_hdr[layer];
+ hdr_w_msk = &hdrs->proto_hdr_w_msk[layer];
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_w_msk, IPV4);
if (!(ipv4_spec && ipv4_mask)) {
hdrs->count = ++layer;
@@ -817,43 +846,54 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
return -rte_errno;
}
- /* Mask for IPv4 src/dst addrs not supported */
- if (ipv4_mask->hdr.src_addr &&
- ipv4_mask->hdr.src_addr != UINT32_MAX)
- return -rte_errno;
- if (ipv4_mask->hdr.dst_addr &&
- ipv4_mask->hdr.dst_addr != UINT32_MAX)
- return -rte_errno;
+ if (!with_mask && ipv4_mask->hdr.src_addr == UINT32_MAX) {
+ input_set |= IAVF_INSET_IPV4_SRC;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
+ SRC);
+ } else if (ipv4_mask->hdr.src_addr) {
+ input_set |= IAVF_INSET_IPV4_SRC;
+ with_mask = 1;
+ }
+
+ if (!with_mask && ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+ input_set |= IAVF_INSET_IPV4_DST;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
+ DST);
+ } else if (ipv4_mask->hdr.dst_addr) {
+ input_set |= IAVF_INSET_IPV4_DST;
+ with_mask = 1;
+ }
if (ipv4_mask->hdr.type_of_service ==
UINT8_MAX) {
input_set |= IAVF_INSET_IPV4_TOS;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
- DSCP);
+ if (!with_mask)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
+ DSCP);
}
if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
input_set |= IAVF_INSET_IPV4_PROTO;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
- PROT);
+ if (!with_mask)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
+ PROT);
}
if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
input_set |= IAVF_INSET_IPV4_TTL;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
- TTL);
+ if (!with_mask)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
+ TTL);
}
- if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
- input_set |= IAVF_INSET_IPV4_SRC;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
- SRC);
- }
-
- if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
- input_set |= IAVF_INSET_IPV4_DST;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
- DST);
+ if (with_mask) {
+ rte_memcpy(hdr_w_msk->buffer_spec, &ipv4_spec->hdr,
+ sizeof(ipv4_spec->hdr));
+ rte_memcpy(hdr_w_msk->buffer_mask, &ipv4_mask->hdr,
+ sizeof(ipv4_mask->hdr));
+ } else {
+ rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
+ sizeof(ipv4_spec->hdr));
}
if (tun_inner) {
@@ -861,9 +901,6 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
input_set |= IAVF_PROT_IPV4_INNER;
}
- rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
- sizeof(ipv4_spec->hdr));
-
hdrs->count = ++layer;
/* fragment Ipv4:
@@ -898,8 +935,10 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
ipv6_mask = item->mask;
hdr = &hdrs->proto_hdr[layer];
+ hdr_w_msk = &hdrs->proto_hdr_w_msk[layer];
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_w_msk, IPV6);
if (!(ipv6_spec && ipv6_mask)) {
hdrs->count = ++layer;
@@ -913,37 +952,59 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
return -rte_errno;
}
+
+ if (!with_mask && !memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.src_addr))) {
+ input_set |= IAVF_INSET_IPV6_SRC;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
+ SRC);
+ } else if (memcmp(ipv6_mask->hdr.src_addr, ipv6_zero_mask,
+ RTE_DIM(ipv6_mask->hdr.src_addr))) {
+ input_set |= IAVF_INSET_IPV6_SRC;
+ with_mask = 1;
+ }
+
+ if (!with_mask && !memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.dst_addr))) {
+ input_set |= IAVF_INSET_IPV6_DST;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
+ DST);
+ } else if (memcmp(ipv6_mask->hdr.dst_addr, ipv6_zero_mask,
+ RTE_DIM(ipv6_mask->hdr.dst_addr))) {
+ input_set |= IAVF_INSET_IPV6_DST;
+ with_mask = 1;
+ }
+
if ((ipv6_mask->hdr.vtc_flow &
- rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
- == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
+ rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
+ == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
input_set |= IAVF_INSET_IPV6_TC;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
- TC);
+ if (!with_mask)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
+ TC);
}
if (ipv6_mask->hdr.proto == UINT8_MAX) {
input_set |= IAVF_INSET_IPV6_NEXT_HDR;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
- PROT);
+ if (!with_mask)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
+ PROT);
}
if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
- HOP_LIMIT);
+ if (!with_mask)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
+ HOP_LIMIT);
}
-
- if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.src_addr))) {
- input_set |= IAVF_INSET_IPV6_SRC;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
- SRC);
- }
- if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.dst_addr))) {
- input_set |= IAVF_INSET_IPV6_DST;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
- DST);
+ if (with_mask) {
+ rte_memcpy(hdr_w_msk->buffer_spec, &ipv6_spec->hdr,
+ sizeof(ipv6_spec->hdr));
+ rte_memcpy(hdr_w_msk->buffer_mask, &ipv6_mask->hdr,
+ sizeof(ipv6_mask->hdr));
+ } else {
+ rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
+ sizeof(ipv6_spec->hdr));
}
if (tun_inner) {
@@ -951,9 +1012,6 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
input_set |= IAVF_PROT_IPV6_INNER;
}
- rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
- sizeof(ipv6_spec->hdr));
-
hdrs->count = ++layer;
break;
@@ -1002,9 +1060,9 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
udp_spec = item->spec;
udp_mask = item->mask;
- hdr = &hdrs->proto_hdr[layer];
+ hdr_w_msk = &hdrs->proto_hdr_w_msk[layer];
- VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_w_msk, UDP);
if (udp_spec && udp_mask) {
if (udp_mask->hdr.dgram_len ||
@@ -1015,36 +1073,24 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
return -rte_errno;
}
- /* Mask for UDP src/dst ports not supported */
- if (udp_mask->hdr.src_port &&
- udp_mask->hdr.src_port != UINT16_MAX)
- return -rte_errno;
- if (udp_mask->hdr.dst_port &&
- udp_mask->hdr.dst_port != UINT16_MAX)
- return -rte_errno;
-
- if (udp_mask->hdr.src_port == UINT16_MAX) {
- input_set |= IAVF_INSET_UDP_SRC_PORT;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
- }
- if (udp_mask->hdr.dst_port == UINT16_MAX) {
- input_set |= IAVF_INSET_UDP_DST_PORT;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
+ if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {
+ if (udp_mask->hdr.src_port)
+ input_set |= IAVF_INSET_UDP_SRC_PORT;
+ if (udp_mask->hdr.dst_port)
+ input_set |= IAVF_INSET_UDP_DST_PORT;
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ rte_memcpy(hdr_w_msk->buffer_spec, &udp_spec->hdr,
+ sizeof(udp_spec->hdr));
+ rte_memcpy(hdr_w_msk->buffer_mask, &udp_mask->hdr,
+ sizeof(udp_mask->hdr));
+ with_mask = 1;
}
if (tun_inner) {
input_set &= ~IAVF_PROT_UDP_OUTER;
input_set |= IAVF_PROT_UDP_INNER;
}
-
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
- rte_memcpy(hdr->buffer,
- &udp_spec->hdr,
- sizeof(udp_spec->hdr));
- else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
- rte_memcpy(hdr->buffer,
- &udp_spec->hdr,
- sizeof(udp_spec->hdr));
}
hdrs->count = ++layer;
@@ -1054,9 +1100,9 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
tcp_spec = item->spec;
tcp_mask = item->mask;
- hdr = &hdrs->proto_hdr[layer];
+ hdr_w_msk = &hdrs->proto_hdr_w_msk[layer];
- VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_w_msk, TCP);
if (tcp_spec && tcp_mask) {
if (tcp_mask->hdr.sent_seq ||
@@ -1072,36 +1118,24 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
return -rte_errno;
}
- /* Mask for TCP src/dst ports not supported */
- if (tcp_mask->hdr.src_port &&
- tcp_mask->hdr.src_port != UINT16_MAX)
- return -rte_errno;
- if (tcp_mask->hdr.dst_port &&
- tcp_mask->hdr.dst_port != UINT16_MAX)
- return -rte_errno;
-
- if (tcp_mask->hdr.src_port == UINT16_MAX) {
- input_set |= IAVF_INSET_TCP_SRC_PORT;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
- }
- if (tcp_mask->hdr.dst_port == UINT16_MAX) {
- input_set |= IAVF_INSET_TCP_DST_PORT;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
+ if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {
+ if (tcp_mask->hdr.src_port)
+ input_set |= IAVF_INSET_TCP_SRC_PORT;
+ if (tcp_mask->hdr.dst_port)
+ input_set |= IAVF_INSET_TCP_DST_PORT;
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ rte_memcpy(hdr_w_msk->buffer_spec, &tcp_spec->hdr,
+ sizeof(tcp_spec->hdr));
+ rte_memcpy(hdr_w_msk->buffer_mask, &tcp_mask->hdr,
+ sizeof(tcp_mask->hdr));
+ with_mask = 1;
}
if (tun_inner) {
input_set &= ~IAVF_PROT_TCP_OUTER;
input_set |= IAVF_PROT_TCP_INNER;
}
-
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
- rte_memcpy(hdr->buffer,
- &tcp_spec->hdr,
- sizeof(tcp_spec->hdr));
- else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
- rte_memcpy(hdr->buffer,
- &tcp_spec->hdr,
- sizeof(tcp_spec->hdr));
}
hdrs->count = ++layer;
@@ -1376,6 +1410,9 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
}
}
+ if (with_mask)
+ hdrs->count += VIRTCHNL_MAX_NUM_PROTO_HDRS;
+
if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index 5e0888ea68..e43ed412bf 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -178,218 +178,218 @@ iavf_hash_parse_pattern_action(struct iavf_adapter *ad,
/* proto_hdrs template */
struct virtchnl_proto_hdrs outer_ipv4_tmplt = {
TUNNEL_LEVEL_OUTER, 4,
- {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4}
+ {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4}}
};
struct virtchnl_proto_hdrs outer_ipv4_udp_tmplt = {
TUNNEL_LEVEL_OUTER, 5,
- {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+ {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
proto_hdr_ipv4_with_prot,
- proto_hdr_udp}
+ proto_hdr_udp}}
};
struct virtchnl_proto_hdrs outer_ipv4_tcp_tmplt = {
TUNNEL_LEVEL_OUTER, 5,
- {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+ {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
proto_hdr_ipv4_with_prot,
- proto_hdr_tcp}
+ proto_hdr_tcp}}
};
struct virtchnl_proto_hdrs outer_ipv4_sctp_tmplt = {
TUNNEL_LEVEL_OUTER, 5,
- {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4,
- proto_hdr_sctp}
+ {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4,
+ proto_hdr_sctp}}
};
struct virtchnl_proto_hdrs outer_ipv6_tmplt = {
TUNNEL_LEVEL_OUTER, 4,
- {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6}
+ {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6}}
};
struct virtchnl_proto_hdrs outer_ipv6_frag_tmplt = {
TUNNEL_LEVEL_OUTER, 5,
- {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
- proto_hdr_ipv6, proto_hdr_ipv6_frag}
+ {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+ proto_hdr_ipv6, proto_hdr_ipv6_frag}}
};
struct virtchnl_proto_hdrs outer_ipv6_udp_tmplt = {
TUNNEL_LEVEL_OUTER, 5,
- {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+ {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
proto_hdr_ipv6_with_prot,
- proto_hdr_udp}
+ proto_hdr_udp}}
};
struct virtchnl_proto_hdrs outer_ipv6_tcp_tmplt = {
TUNNEL_LEVEL_OUTER, 5,
- {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+ {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
proto_hdr_ipv6_with_prot,
- proto_hdr_tcp}
+ proto_hdr_tcp}}
};
struct virtchnl_proto_hdrs outer_ipv6_sctp_tmplt = {
TUNNEL_LEVEL_OUTER, 5,
- {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6,
- proto_hdr_sctp}
+ {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6,
+ proto_hdr_sctp}}
};
struct virtchnl_proto_hdrs inner_ipv4_tmplt = {
- TUNNEL_LEVEL_INNER, 1, {proto_hdr_ipv4}
+ TUNNEL_LEVEL_INNER, 1, {{proto_hdr_ipv4}}
};
struct virtchnl_proto_hdrs inner_ipv4_udp_tmplt = {
- TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4_with_prot, proto_hdr_udp}
+ TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv4_with_prot, proto_hdr_udp}}
};
struct virtchnl_proto_hdrs inner_ipv4_tcp_tmplt = {
- TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4_with_prot, proto_hdr_tcp}
+ TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv4_with_prot, proto_hdr_tcp}}
};
struct virtchnl_proto_hdrs second_inner_ipv4_tmplt = {
- 2, 1, {proto_hdr_ipv4}
+ 2, 1, {{proto_hdr_ipv4}}
};
struct virtchnl_proto_hdrs second_inner_ipv4_udp_tmplt = {
- 2, 2, {proto_hdr_ipv4_with_prot, proto_hdr_udp}
+ 2, 2, {{proto_hdr_ipv4_with_prot, proto_hdr_udp}}
};
struct virtchnl_proto_hdrs second_inner_ipv4_tcp_tmplt = {
- 2, 2, {proto_hdr_ipv4_with_prot, proto_hdr_tcp}
+ 2, 2, {{proto_hdr_ipv4_with_prot, proto_hdr_tcp}}
};
struct virtchnl_proto_hdrs second_inner_ipv6_tmplt = {
- 2, 1, {proto_hdr_ipv6}
+ 2, 1, {{proto_hdr_ipv6}}
};
struct virtchnl_proto_hdrs second_inner_ipv6_udp_tmplt = {
- 2, 2, {proto_hdr_ipv6_with_prot, proto_hdr_udp}
+ 2, 2, {{proto_hdr_ipv6_with_prot, proto_hdr_udp}}
};
struct virtchnl_proto_hdrs second_inner_ipv6_tcp_tmplt = {
- 2, 2, {proto_hdr_ipv6_with_prot, proto_hdr_tcp}
+ 2, 2, {{proto_hdr_ipv6_with_prot, proto_hdr_tcp}}
};
struct virtchnl_proto_hdrs inner_ipv4_sctp_tmplt = {
- TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4, proto_hdr_sctp}
+ TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv4, proto_hdr_sctp}}
};
struct virtchnl_proto_hdrs inner_ipv6_tmplt = {
- TUNNEL_LEVEL_INNER, 1, {proto_hdr_ipv6}
+ TUNNEL_LEVEL_INNER, 1, {{proto_hdr_ipv6}}
};
struct virtchnl_proto_hdrs inner_ipv6_udp_tmplt = {
- TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6_with_prot, proto_hdr_udp}
+ TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv6_with_prot, proto_hdr_udp}}
};
struct virtchnl_proto_hdrs inner_ipv6_tcp_tmplt = {
- TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6_with_prot, proto_hdr_tcp}
+ TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv6_with_prot, proto_hdr_tcp}}
};
struct virtchnl_proto_hdrs inner_ipv6_sctp_tmplt = {
- TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6, proto_hdr_sctp}
+ TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv6, proto_hdr_sctp}}
};
struct virtchnl_proto_hdrs ipv4_esp_tmplt = {
- TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv4, proto_hdr_esp}
+ TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv4, proto_hdr_esp}}
};
struct virtchnl_proto_hdrs ipv4_udp_esp_tmplt = {
TUNNEL_LEVEL_OUTER, 3,
- {proto_hdr_ipv4, proto_hdr_udp, proto_hdr_esp}
+ {{proto_hdr_ipv4, proto_hdr_udp, proto_hdr_esp}}
};
struct virtchnl_proto_hdrs ipv4_ah_tmplt = {
- TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv4, proto_hdr_ah}
+ TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv4, proto_hdr_ah}}
};
struct virtchnl_proto_hdrs ipv6_esp_tmplt = {
- TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv6, proto_hdr_esp}
+ TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv6, proto_hdr_esp}}
};
struct virtchnl_proto_hdrs ipv6_udp_esp_tmplt = {
TUNNEL_LEVEL_OUTER, 3,
- {proto_hdr_ipv6, proto_hdr_udp, proto_hdr_esp}
+ {{proto_hdr_ipv6, proto_hdr_udp, proto_hdr_esp}}
};
struct virtchnl_proto_hdrs ipv6_ah_tmplt = {
- TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv6, proto_hdr_ah}
+ TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv6, proto_hdr_ah}}
};
struct virtchnl_proto_hdrs ipv4_l2tpv3_tmplt = {
- TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv4, proto_hdr_l2tpv3}
+ TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv4, proto_hdr_l2tpv3}}
};
struct virtchnl_proto_hdrs ipv6_l2tpv3_tmplt = {
- TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv6, proto_hdr_l2tpv3}
+ TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv6, proto_hdr_l2tpv3}}
};
struct virtchnl_proto_hdrs ipv4_pfcp_tmplt = {
- TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv4, proto_hdr_pfcp}
+ TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv4, proto_hdr_pfcp}}
};
struct virtchnl_proto_hdrs ipv6_pfcp_tmplt = {
- TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv6, proto_hdr_pfcp}
+ TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv6, proto_hdr_pfcp}}
};
struct virtchnl_proto_hdrs ipv4_udp_gtpc_tmplt = {
- TUNNEL_LEVEL_OUTER, 3, {proto_hdr_ipv4, proto_hdr_udp, proto_hdr_gtpc}
+ TUNNEL_LEVEL_OUTER, 3, {{proto_hdr_ipv4, proto_hdr_udp, proto_hdr_gtpc}}
};
struct virtchnl_proto_hdrs ipv6_udp_gtpc_tmplt = {
- TUNNEL_LEVEL_OUTER, 3, {proto_hdr_ipv6, proto_hdr_udp, proto_hdr_gtpc}
+ TUNNEL_LEVEL_OUTER, 3, {{proto_hdr_ipv6, proto_hdr_udp, proto_hdr_gtpc}}
};
struct virtchnl_proto_hdrs eth_ecpri_tmplt = {
- TUNNEL_LEVEL_OUTER, 2, {proto_hdr_eth, proto_hdr_ecpri}
+ TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_eth, proto_hdr_ecpri}}
};
struct virtchnl_proto_hdrs ipv4_ecpri_tmplt = {
- TUNNEL_LEVEL_OUTER, 3, {proto_hdr_ipv4, proto_hdr_udp, proto_hdr_ecpri}
+ TUNNEL_LEVEL_OUTER, 3, {{proto_hdr_ipv4, proto_hdr_udp, proto_hdr_ecpri}}
};
struct virtchnl_proto_hdrs udp_l2tpv2_ppp_ipv4_tmplt = {
TUNNEL_LEVEL_INNER, 3,
- {proto_hdr_l2tpv2,
+ {{proto_hdr_l2tpv2,
proto_hdr_ppp,
- proto_hdr_ipv4}
+ proto_hdr_ipv4}}
};
struct virtchnl_proto_hdrs udp_l2tpv2_ppp_ipv6_tmplt = {
TUNNEL_LEVEL_INNER, 3,
- {proto_hdr_l2tpv2,
+ {{proto_hdr_l2tpv2,
proto_hdr_ppp,
- proto_hdr_ipv6}
+ proto_hdr_ipv6}}
};
struct virtchnl_proto_hdrs udp_l2tpv2_ppp_ipv4_udp_tmplt = {
TUNNEL_LEVEL_INNER, 4,
- {proto_hdr_l2tpv2,
+ {{proto_hdr_l2tpv2,
proto_hdr_ppp,
proto_hdr_ipv4_with_prot,
- proto_hdr_udp}
+ proto_hdr_udp}}
};
struct virtchnl_proto_hdrs udp_l2tpv2_ppp_ipv4_tcp_tmplt = {
TUNNEL_LEVEL_INNER, 4,
- {proto_hdr_l2tpv2,
+ {{proto_hdr_l2tpv2,
proto_hdr_ppp,
proto_hdr_ipv4_with_prot,
- proto_hdr_tcp}
+ proto_hdr_tcp}}
};
struct virtchnl_proto_hdrs udp_l2tpv2_ppp_ipv6_udp_tmplt = {
TUNNEL_LEVEL_INNER, 4,
- {proto_hdr_l2tpv2,
+ {{proto_hdr_l2tpv2,
proto_hdr_ppp,
proto_hdr_ipv6_with_prot,
- proto_hdr_udp}
+ proto_hdr_udp}}
};
struct virtchnl_proto_hdrs udp_l2tpv2_ppp_ipv6_tcp_tmplt = {
TUNNEL_LEVEL_INNER, 4,
- {proto_hdr_l2tpv2,
+ {{proto_hdr_l2tpv2,
proto_hdr_ppp,
proto_hdr_ipv6_with_prot,
- proto_hdr_tcp}
+ proto_hdr_tcp}}
};
/* rss type super set */
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index cefbccd3e4..e0066e1dac 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -3197,6 +3197,11 @@ static int ice_init_rss(struct ice_pf *pf)
bool is_safe_mode = pf->adapter->is_safe_mode;
uint32_t reg;
+#ifdef RTE_ETHDEV_EXCL_Q
+ uint16_t q_index = 0;
+ uint16_t excl_q = RSS_RETA_EXCL_Q_ID;
+#endif /* RTE_ETHDEV_EXCL_Q */
+
rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf;
nb_q = dev_data->nb_rx_queues;
vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE +
@@ -3253,6 +3258,27 @@ static int ice_init_rss(struct ice_pf *pf)
for (i = 0; i < vsi->rss_lut_size; i++)
vsi->rss_lut[i] = i % nb_q;
+#ifdef RTE_ETHDEV_EXCL_Q
+ if (nb_q > 1) {
+ if (excl_q > nb_q) {
+ /* if excl_q index is higher than the max_queue, assign default
+ * RSS LUT
+ */
+ for (i = 0; i < vsi->rss_lut_size; i++)
+ vsi->rss_lut[i] = i % nb_q;
+
+ } else {
+ for (i = 0, q_index = 0; i < vsi->rss_lut_size; i++, q_index++) {
+ /* Increment q_index to skip excl_q */
+ if (q_index % nb_q == excl_q)
+ q_index++;
+
+ vsi->rss_lut[i] = q_index % (nb_q);
+ }
+ }
+ }
+#endif /* RTE_ETHDEV_EXCL_Q */
+
lut_params.vsi_handle = vsi->idx;
lut_params.lut_size = vsi->rss_lut_size;
lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index 4da4baf2a1..4ae8db3936 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -160,6 +160,9 @@ extern "C" {
#define RTE_ETHDEV_DEBUG_TX
#endif
+#define RTE_ETHDEV_EXCL_Q
+#define RSS_RETA_EXCL_Q_ID 1
+
#include <rte_compat.h>
#include <rte_log.h>
#include <rte_interrupts.h>
--
2.31.1
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [RFC PATCH v3] net/iavf: support rte flow with mask for FDIR
2024-03-25 17:01 [RFC PATCH v3] net/iavf: support rte flow with mask for FDIR Ian Stokes
@ 2024-10-15 23:08 ` Stephen Hemminger
0 siblings, 0 replies; 2+ messages in thread
From: Stephen Hemminger @ 2024-10-15 23:08 UTC (permalink / raw)
To: Ian Stokes; +Cc: dev, Ananth S, Zhichao Zeng
On Mon, 25 Mar 2024 17:01:41 +0000
Ian Stokes <ian.stokes@intel.com> wrote:
> From: Ananth S <ananth.s@intel.com>
>
> This patch supports rte flow with mask for FDIR, including
> eth/ipv4/ipv6/tcp/udp flow items, where src/dst for ipv4/ipv6
> and sport/dport for tcp/udp are realized by switch filter.
>
> This patch additionally contains the fixes for the issues
> identified in the patch [21.11.5 v2].
>
> This patch is based on DPDK v21.11.5 LTS
> [4e50ad4469f7c037e32de5aa3535d1cd25de0741], for customer cherry-pick.
If this is meant for stable then it should be sent to the stable maintainers.
Or is it meant for upstream.
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2024-10-15 23:08 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-03-25 17:01 [RFC PATCH v3] net/iavf: support rte flow with mask for FDIR Ian Stokes
2024-10-15 23:08 ` Stephen Hemminger
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).