From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0E1FE45BB0; Wed, 23 Oct 2024 14:03:19 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id DDF90400D7; Wed, 23 Oct 2024 14:00:38 +0200 (CEST) Received: from inva021.nxp.com (inva021.nxp.com [92.121.34.21]) by mails.dpdk.org (Postfix) with ESMTP id 952D4402E6 for ; Wed, 23 Oct 2024 14:00:09 +0200 (CEST) Received: from inva021.nxp.com (localhost [127.0.0.1]) by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id 6BAE7202582; Wed, 23 Oct 2024 14:00:09 +0200 (CEST) Received: from aprdc01srsp001v.ap-rdc01.nxp.com (aprdc01srsp001v.ap-rdc01.nxp.com [165.114.16.16]) by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id 0C0EF20254D; Wed, 23 Oct 2024 14:00:09 +0200 (CEST) Received: from lsv03379.swis.in-blr01.nxp.com (lsv03379.swis.in-blr01.nxp.com [92.120.147.188]) by aprdc01srsp001v.ap-rdc01.nxp.com (Postfix) with ESMTP id 8023E183DC04; Wed, 23 Oct 2024 20:00:08 +0800 (+08) From: vanshika.shukla@nxp.com To: dev@dpdk.org, Hemant Agrawal , Sachin Saxena Cc: Jun Yang Subject: [v5 28/42] net/dpaa2: protocol inside tunnel distribution Date: Wed, 23 Oct 2024 17:29:41 +0530 Message-Id: <20241023115955.1207617-29-vanshika.shukla@nxp.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20241023115955.1207617-1-vanshika.shukla@nxp.com> References: <20241022191256.516818-1-vanshika.shukla@nxp.com> <20241023115955.1207617-1-vanshika.shukla@nxp.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Virus-Scanned: ClamAV using ClamSMTP X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Jun Yang Control flow by protocols inside tunnel. The tunnel flow items applied by application are in order from outer to inner. The inner items start from tunnel item, something like vxlan, GRE etc. For example: flow create 0 ingress pattern ipv4 / vxlan / ipv6 / end actions pf / queue index 2 / end So the items following the tunnel item are tagged with "innner". The inner items are extracted from parser results which are set by soft parser. So far only vxlan tunnel is supported. Limited by soft parser area, only ethernet header and vlan header inside tunnel are able to be used for flow distribution. IPv4, IPv6, UDP and TCP inside tunnel can be detected by user defined FAF set by SP for flow distribution. Signed-off-by: Jun Yang --- drivers/net/dpaa2/dpaa2_flow.c | 587 +++++++++++++++++++++++++++++---- 1 file changed, 519 insertions(+), 68 deletions(-) diff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c index 3530417a29..d02859fea7 100644 --- a/drivers/net/dpaa2/dpaa2_flow.c +++ b/drivers/net/dpaa2/dpaa2_flow.c @@ -58,6 +58,11 @@ struct dpaa2_dev_flow { struct dpni_fs_action_cfg fs_action_cfg; }; +struct rte_dpaa2_flow_item { + struct rte_flow_item generic_item; + int in_tunnel; +}; + static const enum rte_flow_item_type dpaa2_supported_pattern_type[] = { RTE_FLOW_ITEM_TYPE_END, @@ -1935,10 +1940,203 @@ dpaa2_flow_add_ipaddr_extract_rule(struct dpaa2_dev_flow *flow, } static int -dpaa2_configure_flow_eth(struct dpaa2_dev_flow *flow, +dpaa2_configure_flow_tunnel_eth(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, + int *device_configured) +{ + int ret, local_cfg = 0; + uint32_t group; + const struct rte_flow_item_eth *spec, *mask; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0}; + + group = attr->group; + + /* Parse pattern list to get the matching parameters */ + spec = pattern->spec; + mask = pattern->mask ? + pattern->mask : &dpaa2_flow_item_eth_mask; + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->tc_index = attr->priority; + + if (!spec) + return 0; + + if (dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_ETH)) { + DPAA2_PMD_WARN("Extract field(s) of ethernet failed"); + + return -EINVAL; + } + + if (memcmp((const char *)&mask->src, + zero_cmp, RTE_ETHER_ADDR_LEN)) { + /*SRC[0:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_SADDR0_OFFSET, + 1, &spec->src.addr_bytes[0], + &mask->src.addr_bytes[0], + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + /*SRC[1:2]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_SADDR1_OFFSET, + 2, &spec->src.addr_bytes[1], + &mask->src.addr_bytes[1], + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + /*SRC[3:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_SADDR3_OFFSET, + 1, &spec->src.addr_bytes[3], + &mask->src.addr_bytes[3], + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + /*SRC[4:2]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_SADDR4_OFFSET, + 2, &spec->src.addr_bytes[4], + &mask->src.addr_bytes[4], + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + /*SRC[0:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_SADDR0_OFFSET, + 1, &spec->src.addr_bytes[0], + &mask->src.addr_bytes[0], + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + /*SRC[1:2]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_SADDR1_OFFSET, + 2, &spec->src.addr_bytes[1], + &mask->src.addr_bytes[1], + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + /*SRC[3:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_SADDR3_OFFSET, + 1, &spec->src.addr_bytes[3], + &mask->src.addr_bytes[3], + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + /*SRC[4:2]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_SADDR4_OFFSET, + 2, &spec->src.addr_bytes[4], + &mask->src.addr_bytes[4], + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + if (memcmp((const char *)&mask->dst, + zero_cmp, RTE_ETHER_ADDR_LEN)) { + /*DST[0:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_DADDR0_OFFSET, + 1, &spec->dst.addr_bytes[0], + &mask->dst.addr_bytes[0], + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + /*DST[1:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_DADDR1_OFFSET, + 1, &spec->dst.addr_bytes[1], + &mask->dst.addr_bytes[1], + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + /*DST[2:3]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_DADDR2_OFFSET, + 3, &spec->dst.addr_bytes[2], + &mask->dst.addr_bytes[2], + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + /*DST[5:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_DADDR5_OFFSET, + 1, &spec->dst.addr_bytes[5], + &mask->dst.addr_bytes[5], + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + /*DST[0:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_DADDR0_OFFSET, + 1, &spec->dst.addr_bytes[0], + &mask->dst.addr_bytes[0], + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + /*DST[1:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_DADDR1_OFFSET, + 1, &spec->dst.addr_bytes[1], + &mask->dst.addr_bytes[1], + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + /*DST[2:3]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_DADDR2_OFFSET, + 3, &spec->dst.addr_bytes[2], + &mask->dst.addr_bytes[2], + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + /*DST[5:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_DADDR5_OFFSET, + 1, &spec->dst.addr_bytes[5], + &mask->dst.addr_bytes[5], + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + if (memcmp((const char *)&mask->type, + zero_cmp, sizeof(rte_be16_t))) { + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_TYPE_OFFSET, + sizeof(rte_be16_t), &spec->type, &mask->type, + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_TYPE_OFFSET, + sizeof(rte_be16_t), &spec->type, &mask->type, + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + (*device_configured) |= local_cfg; + + return 0; +} + +static int +dpaa2_configure_flow_eth(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error __rte_unused, int *device_configured) @@ -1948,6 +2146,13 @@ dpaa2_configure_flow_eth(struct dpaa2_dev_flow *flow, const struct rte_flow_item_eth *spec, *mask; struct dpaa2_dev_priv *priv = dev->data->dev_private; const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0}; + const struct rte_flow_item *pattern = + &dpaa2_pattern->generic_item; + + if (dpaa2_pattern->in_tunnel) { + return dpaa2_configure_flow_tunnel_eth(flow, + dev, attr, pattern, device_configured); + } group = attr->group; @@ -2041,10 +2246,81 @@ dpaa2_configure_flow_eth(struct dpaa2_dev_flow *flow, } static int -dpaa2_configure_flow_vlan(struct dpaa2_dev_flow *flow, +dpaa2_configure_flow_tunnel_vlan(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, + int *device_configured) +{ + int ret, local_cfg = 0; + uint32_t group; + const struct rte_flow_item_vlan *spec, *mask; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* Parse pattern list to get the matching parameters */ + spec = pattern->spec; + mask = pattern->mask ? + pattern->mask : &dpaa2_flow_item_vlan_mask; + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->tc_index = attr->priority; + + if (!spec) { + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_VLAN_FRAM, + DPAA2_FLOW_QOS_TYPE, + group, &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_VLAN_FRAM, + DPAA2_FLOW_FS_TYPE, + group, &local_cfg); + if (ret) + return ret; + + (*device_configured) |= local_cfg; + return 0; + } + + if (dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_VLAN)) { + DPAA2_PMD_WARN("Extract field(s) of vlan not support."); + + return -EINVAL; + } + + if (!mask->tci) + return 0; + + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_TCI_OFFSET, + sizeof(rte_be16_t), &spec->tci, &mask->tci, + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_TCI_OFFSET, + sizeof(rte_be16_t), &spec->tci, &mask->tci, + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + + (*device_configured) |= local_cfg; + + return 0; +} + +static int +dpaa2_configure_flow_vlan(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error __rte_unused, int *device_configured) @@ -2053,6 +2329,13 @@ dpaa2_configure_flow_vlan(struct dpaa2_dev_flow *flow, uint32_t group; const struct rte_flow_item_vlan *spec, *mask; struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = + &dpaa2_pattern->generic_item; + + if (dpaa2_pattern->in_tunnel) { + return dpaa2_configure_flow_tunnel_vlan(flow, + dev, attr, pattern, device_configured); + } group = attr->group; @@ -2112,7 +2395,7 @@ dpaa2_configure_flow_vlan(struct dpaa2_dev_flow *flow, static int dpaa2_configure_flow_ipv4(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, + const struct rte_dpaa2_flow_item *dpaa2_pattern, const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error __rte_unused, int *device_configured) @@ -2123,6 +2406,7 @@ dpaa2_configure_flow_ipv4(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, const void *key, *mask; struct dpaa2_dev_priv *priv = dev->data->dev_private; int size; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; group = attr->group; @@ -2131,6 +2415,26 @@ dpaa2_configure_flow_ipv4(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, mask_ipv4 = pattern->mask ? pattern->mask : &dpaa2_flow_item_ipv4_mask; + if (dpaa2_pattern->in_tunnel) { + if (spec_ipv4) { + DPAA2_PMD_ERR("Tunnel-IPv4 distribution not support"); + return -ENOTSUP; + } + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_IPV4_FRAM, + DPAA2_FLOW_QOS_TYPE, group, + &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_IPV4_FRAM, + DPAA2_FLOW_FS_TYPE, group, + &local_cfg); + return ret; + } + /* Get traffic class index and flow id to be configured */ flow->tc_id = group; flow->tc_index = attr->priority; @@ -2229,7 +2533,7 @@ dpaa2_configure_flow_ipv4(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, static int dpaa2_configure_flow_ipv6(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, + const struct rte_dpaa2_flow_item *dpaa2_pattern, const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error __rte_unused, int *device_configured) @@ -2241,6 +2545,7 @@ dpaa2_configure_flow_ipv6(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, struct dpaa2_dev_priv *priv = dev->data->dev_private; const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0}; int size; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; group = attr->group; @@ -2252,6 +2557,26 @@ dpaa2_configure_flow_ipv6(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, flow->tc_id = group; flow->tc_index = attr->priority; + if (dpaa2_pattern->in_tunnel) { + if (spec_ipv6) { + DPAA2_PMD_ERR("Tunnel-IPv6 distribution not support"); + return -ENOTSUP; + } + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_IPV6_FRAM, + DPAA2_FLOW_QOS_TYPE, group, + &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_IPV6_FRAM, + DPAA2_FLOW_FS_TYPE, group, + &local_cfg); + return ret; + } + ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV6_FRAM, DPAA2_FLOW_QOS_TYPE, group, &local_cfg); @@ -2348,7 +2673,7 @@ static int dpaa2_configure_flow_icmp(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, + const struct rte_dpaa2_flow_item *dpaa2_pattern, const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error __rte_unused, int *device_configured) @@ -2357,6 +2682,7 @@ dpaa2_configure_flow_icmp(struct dpaa2_dev_flow *flow, uint32_t group; const struct rte_flow_item_icmp *spec, *mask; struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; group = attr->group; @@ -2369,6 +2695,11 @@ dpaa2_configure_flow_icmp(struct dpaa2_dev_flow *flow, flow->tc_id = group; flow->tc_index = attr->priority; + if (dpaa2_pattern->in_tunnel) { + DPAA2_PMD_ERR("Tunnel-ICMP distribution not support"); + return -ENOTSUP; + } + if (!spec) { ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_ICMP_FRAM, DPAA2_FLOW_QOS_TYPE, @@ -2434,7 +2765,7 @@ static int dpaa2_configure_flow_udp(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, + const struct rte_dpaa2_flow_item *dpaa2_pattern, const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error __rte_unused, int *device_configured) @@ -2443,6 +2774,7 @@ dpaa2_configure_flow_udp(struct dpaa2_dev_flow *flow, uint32_t group; const struct rte_flow_item_udp *spec, *mask; struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; group = attr->group; @@ -2455,6 +2787,26 @@ dpaa2_configure_flow_udp(struct dpaa2_dev_flow *flow, flow->tc_id = group; flow->tc_index = attr->priority; + if (dpaa2_pattern->in_tunnel) { + if (spec) { + DPAA2_PMD_ERR("Tunnel-UDP distribution not support"); + return -ENOTSUP; + } + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_UDP_FRAM, + DPAA2_FLOW_QOS_TYPE, group, + &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_UDP_FRAM, + DPAA2_FLOW_FS_TYPE, group, + &local_cfg); + return ret; + } + ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_UDP_FRAM, DPAA2_FLOW_QOS_TYPE, group, &local_cfg); @@ -2520,7 +2872,7 @@ static int dpaa2_configure_flow_tcp(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, + const struct rte_dpaa2_flow_item *dpaa2_pattern, const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error __rte_unused, int *device_configured) @@ -2529,6 +2881,7 @@ dpaa2_configure_flow_tcp(struct dpaa2_dev_flow *flow, uint32_t group; const struct rte_flow_item_tcp *spec, *mask; struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; group = attr->group; @@ -2541,6 +2894,26 @@ dpaa2_configure_flow_tcp(struct dpaa2_dev_flow *flow, flow->tc_id = group; flow->tc_index = attr->priority; + if (dpaa2_pattern->in_tunnel) { + if (spec) { + DPAA2_PMD_ERR("Tunnel-TCP distribution not support"); + return -ENOTSUP; + } + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_TCP_FRAM, + DPAA2_FLOW_QOS_TYPE, group, + &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_TCP_FRAM, + DPAA2_FLOW_FS_TYPE, group, + &local_cfg); + return ret; + } + ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_TCP_FRAM, DPAA2_FLOW_QOS_TYPE, group, &local_cfg); @@ -2606,7 +2979,7 @@ static int dpaa2_configure_flow_sctp(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, + const struct rte_dpaa2_flow_item *dpaa2_pattern, const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error __rte_unused, int *device_configured) @@ -2615,6 +2988,7 @@ dpaa2_configure_flow_sctp(struct dpaa2_dev_flow *flow, uint32_t group; const struct rte_flow_item_sctp *spec, *mask; struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; group = attr->group; @@ -2627,6 +3001,11 @@ dpaa2_configure_flow_sctp(struct dpaa2_dev_flow *flow, flow->tc_id = group; flow->tc_index = attr->priority; + if (dpaa2_pattern->in_tunnel) { + DPAA2_PMD_ERR("Tunnel-SCTP distribution not support"); + return -ENOTSUP; + } + ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_SCTP_FRAM, DPAA2_FLOW_QOS_TYPE, group, &local_cfg); @@ -2692,7 +3071,7 @@ static int dpaa2_configure_flow_gre(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, + const struct rte_dpaa2_flow_item *dpaa2_pattern, const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error __rte_unused, int *device_configured) @@ -2701,6 +3080,7 @@ dpaa2_configure_flow_gre(struct dpaa2_dev_flow *flow, uint32_t group; const struct rte_flow_item_gre *spec, *mask; struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; group = attr->group; @@ -2713,6 +3093,11 @@ dpaa2_configure_flow_gre(struct dpaa2_dev_flow *flow, flow->tc_id = group; flow->tc_index = attr->priority; + if (dpaa2_pattern->in_tunnel) { + DPAA2_PMD_ERR("Tunnel-GRE distribution not support"); + return -ENOTSUP; + } + if (!spec) { ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_GRE_FRAM, DPAA2_FLOW_QOS_TYPE, @@ -2763,7 +3148,7 @@ static int dpaa2_configure_flow_vxlan(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, + const struct rte_dpaa2_flow_item *dpaa2_pattern, const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error __rte_unused, int *device_configured) @@ -2772,6 +3157,7 @@ dpaa2_configure_flow_vxlan(struct dpaa2_dev_flow *flow, uint32_t group; const struct rte_flow_item_vxlan *spec, *mask; struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; group = attr->group; @@ -2784,6 +3170,11 @@ dpaa2_configure_flow_vxlan(struct dpaa2_dev_flow *flow, flow->tc_id = group; flow->tc_index = attr->priority; + if (dpaa2_pattern->in_tunnel) { + DPAA2_PMD_ERR("Tunnel-VXLAN distribution not support"); + return -ENOTSUP; + } + if (!spec) { ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_VXLAN_FRAM, DPAA2_FLOW_QOS_TYPE, @@ -2847,18 +3238,19 @@ static int dpaa2_configure_flow_raw(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, + const struct rte_dpaa2_flow_item *dpaa2_pattern, const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error __rte_unused, int *device_configured) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - const struct rte_flow_item_raw *spec = pattern->spec; - const struct rte_flow_item_raw *mask = pattern->mask; int local_cfg = 0, ret; uint32_t group; struct dpaa2_key_extract *qos_key_extract; struct dpaa2_key_extract *tc_key_extract; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; + const struct rte_flow_item_raw *spec = pattern->spec; + const struct rte_flow_item_raw *mask = pattern->mask; /* Need both spec and mask */ if (!spec || !mask) { @@ -3302,6 +3694,45 @@ dpaa2_configure_qos_table(struct dpaa2_dev_priv *priv, return 0; } +static int +dpaa2_flow_item_convert(const struct rte_flow_item pattern[], + struct rte_dpaa2_flow_item **dpaa2_pattern) +{ + struct rte_dpaa2_flow_item *new_pattern; + int num = 0, tunnel_start = 0; + + while (1) { + num++; + if (pattern[num].type == RTE_FLOW_ITEM_TYPE_END) + break; + } + + new_pattern = rte_malloc(NULL, sizeof(struct rte_dpaa2_flow_item) * num, + RTE_CACHE_LINE_SIZE); + if (!new_pattern) { + DPAA2_PMD_ERR("Failed to alloc %d flow items", num); + return -ENOMEM; + } + + num = 0; + while (pattern[num].type != RTE_FLOW_ITEM_TYPE_END) { + memcpy(&new_pattern[num].generic_item, &pattern[num], + sizeof(struct rte_flow_item)); + new_pattern[num].in_tunnel = 0; + + if (pattern[num].type == RTE_FLOW_ITEM_TYPE_VXLAN) + tunnel_start = 1; + else if (tunnel_start) + new_pattern[num].in_tunnel = 1; + num++; + } + + new_pattern[num].generic_item.type = RTE_FLOW_ITEM_TYPE_END; + *dpaa2_pattern = new_pattern; + + return 0; +} + static int dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, @@ -3318,6 +3749,7 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow, uint16_t dist_size, key_size; struct dpaa2_key_extract *qos_key_extract; struct dpaa2_key_extract *tc_key_extract; + struct rte_dpaa2_flow_item *dpaa2_pattern = NULL; ret = dpaa2_flow_verify_attr(priv, attr); if (ret) @@ -3327,107 +3759,121 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow, if (ret) return ret; + ret = dpaa2_flow_item_convert(pattern, &dpaa2_pattern); + if (ret) + return ret; + /* Parse pattern list to get the matching parameters */ while (!end_of_list) { switch (pattern[i].type) { case RTE_FLOW_ITEM_TYPE_ETH: - ret = dpaa2_configure_flow_eth(flow, - dev, attr, &pattern[i], actions, error, - &is_keycfg_configured); + ret = dpaa2_configure_flow_eth(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); if (ret) { DPAA2_PMD_ERR("ETH flow config failed!"); - return ret; + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_VLAN: - ret = dpaa2_configure_flow_vlan(flow, - dev, attr, &pattern[i], actions, error, - &is_keycfg_configured); + ret = dpaa2_configure_flow_vlan(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); if (ret) { DPAA2_PMD_ERR("vLan flow config failed!"); - return ret; + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_IPV4: - ret = dpaa2_configure_flow_ipv4(flow, - dev, attr, &pattern[i], actions, error, - &is_keycfg_configured); + ret = dpaa2_configure_flow_ipv4(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); if (ret) { DPAA2_PMD_ERR("IPV4 flow config failed!"); - return ret; + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_IPV6: - ret = dpaa2_configure_flow_ipv6(flow, - dev, attr, &pattern[i], actions, error, - &is_keycfg_configured); + ret = dpaa2_configure_flow_ipv6(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); if (ret) { DPAA2_PMD_ERR("IPV6 flow config failed!"); - return ret; + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_ICMP: - ret = dpaa2_configure_flow_icmp(flow, - dev, attr, &pattern[i], actions, error, - &is_keycfg_configured); + ret = dpaa2_configure_flow_icmp(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); if (ret) { DPAA2_PMD_ERR("ICMP flow config failed!"); - return ret; + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_UDP: - ret = dpaa2_configure_flow_udp(flow, - dev, attr, &pattern[i], actions, error, - &is_keycfg_configured); + ret = dpaa2_configure_flow_udp(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); if (ret) { DPAA2_PMD_ERR("UDP flow config failed!"); - return ret; + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_TCP: - ret = dpaa2_configure_flow_tcp(flow, - dev, attr, &pattern[i], actions, error, - &is_keycfg_configured); + ret = dpaa2_configure_flow_tcp(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); if (ret) { DPAA2_PMD_ERR("TCP flow config failed!"); - return ret; + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_SCTP: - ret = dpaa2_configure_flow_sctp(flow, - dev, attr, &pattern[i], actions, error, - &is_keycfg_configured); + ret = dpaa2_configure_flow_sctp(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); if (ret) { DPAA2_PMD_ERR("SCTP flow config failed!"); - return ret; + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_GRE: - ret = dpaa2_configure_flow_gre(flow, - dev, attr, &pattern[i], actions, error, - &is_keycfg_configured); + ret = dpaa2_configure_flow_gre(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); if (ret) { DPAA2_PMD_ERR("GRE flow config failed!"); - return ret; + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_VXLAN: - ret = dpaa2_configure_flow_vxlan(flow, - dev, attr, &pattern[i], actions, error, - &is_keycfg_configured); + ret = dpaa2_configure_flow_vxlan(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); if (ret) { DPAA2_PMD_ERR("VXLAN flow config failed!"); - return ret; + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_RAW: - ret = dpaa2_configure_flow_raw(flow, - dev, attr, &pattern[i], - actions, error, - &is_keycfg_configured); + ret = dpaa2_configure_flow_raw(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); if (ret) { DPAA2_PMD_ERR("RAW flow config failed!"); - return ret; + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_END: @@ -3459,7 +3905,7 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow, ret = dpaa2_configure_flow_fs_action(priv, flow, &actions[j]); if (ret) - return ret; + goto end_flow_set; /* Configure FS table first*/ dist_size = priv->nb_rx_queues / priv->num_rx_tc; @@ -3469,20 +3915,20 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow, dist_size, false); if (ret) - return ret; + goto end_flow_set; } /* Configure QoS table then.*/ if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) { ret = dpaa2_configure_qos_table(priv, false); if (ret) - return ret; + goto end_flow_set; } if (priv->num_rx_tc > 1) { ret = dpaa2_flow_add_qos_rule(priv, flow); if (ret) - return ret; + goto end_flow_set; } if (flow->tc_index >= priv->fs_entries) { @@ -3493,7 +3939,7 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow, ret = dpaa2_flow_add_fs_rule(priv, flow); if (ret) - return ret; + goto end_flow_set; break; case RTE_FLOW_ACTION_TYPE_RSS: @@ -3505,7 +3951,7 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow, if (ret < 0) { DPAA2_PMD_ERR("TC[%d] distset RSS failed", flow->tc_id); - return ret; + goto end_flow_set; } dist_size = rss_conf->queue_num; @@ -3515,22 +3961,22 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow, dist_size, true); if (ret) - return ret; + goto end_flow_set; } if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) { ret = dpaa2_configure_qos_table(priv, true); if (ret) - return ret; + goto end_flow_set; } ret = dpaa2_flow_add_qos_rule(priv, flow); if (ret) - return ret; + goto end_flow_set; ret = dpaa2_flow_add_fs_rule(priv, flow); if (ret) - return ret; + goto end_flow_set; break; case RTE_FLOW_ACTION_TYPE_PF: @@ -3547,6 +3993,7 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow, j++; } +end_flow_set: if (!ret) { /* New rules are inserted. */ if (!curr) { @@ -3557,6 +4004,10 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow, LIST_INSERT_AFTER(curr, flow, next); } } + + if (dpaa2_pattern) + rte_free(dpaa2_pattern); + return ret; } -- 2.25.1