From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id F236345B4C; Tue, 22 Oct 2024 21:16:16 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id ACA8040E01; Tue, 22 Oct 2024 21:15:04 +0200 (CEST) Received: from inva020.nxp.com (inva020.nxp.com [92.121.34.13]) by mails.dpdk.org (Postfix) with ESMTP id 207BA40B9C for ; Tue, 22 Oct 2024 21:13:10 +0200 (CEST) Received: from inva020.nxp.com (localhost [127.0.0.1]) by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id 03E3F1A11E8; Tue, 22 Oct 2024 21:13:10 +0200 (CEST) Received: from aprdc01srsp001v.ap-rdc01.nxp.com (aprdc01srsp001v.ap-rdc01.nxp.com [165.114.16.16]) by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id ADD141A050D; Tue, 22 Oct 2024 21:13:09 +0200 (CEST) Received: from lsv03379.swis.in-blr01.nxp.com (lsv03379.swis.in-blr01.nxp.com [92.120.147.188]) by aprdc01srsp001v.ap-rdc01.nxp.com (Postfix) with ESMTP id 94A81183DC02; Wed, 23 Oct 2024 03:13:08 +0800 (+08) From: vanshika.shukla@nxp.com To: dev@dpdk.org, Hemant Agrawal , Sachin Saxena Cc: Jun Yang Subject: [v4 27/42] net/dpaa2: add VXLAN distribution support Date: Wed, 23 Oct 2024 00:42:40 +0530 Message-Id: <20241022191256.516818-28-vanshika.shukla@nxp.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20241022191256.516818-1-vanshika.shukla@nxp.com> References: <20241014120126.170790-1-vanshika.shukla@nxp.com> <20241022191256.516818-1-vanshika.shukla@nxp.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Virus-Scanned: ClamAV using ClamSMTP X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Jun Yang Extracts from vxlan header for distribution. The vxlan header is set by soft parser code in soft parser context located from offset 43 of parser results: vxlan protocol is identified by vxlan bit of frame attribute flags. The parser result extracts are added for this functionality. Example: flow create 0 ingress pattern vxlan / end actions pf / queue index 4 / end Signed-off-by: Jun Yang --- drivers/net/dpaa2/dpaa2_ethdev.h | 6 +- drivers/net/dpaa2/dpaa2_flow.c | 313 +++++++++++++++++++++++++++++++ 2 files changed, 318 insertions(+), 1 deletion(-) diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h index 8f548467a4..aeddcfdfa9 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.h +++ b/drivers/net/dpaa2/dpaa2_ethdev.h @@ -282,8 +282,12 @@ enum ip_addr_extract_type { }; enum key_prot_type { + /* HW extracts from standard protocol fields*/ DPAA2_NET_PROT_KEY, - DPAA2_FAF_KEY + /* HW extracts from FAF of PR*/ + DPAA2_FAF_KEY, + /* HW extracts from PR other than FAF*/ + DPAA2_PR_KEY }; struct key_prot_field { diff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c index 4c80efeff7..3530417a29 100644 --- a/drivers/net/dpaa2/dpaa2_flow.c +++ b/drivers/net/dpaa2/dpaa2_flow.c @@ -38,6 +38,8 @@ enum dpaa2_flow_dist_type { #define DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT 16 #define DPAA2_FLOW_MAX_KEY_SIZE 16 +#define VXLAN_HF_VNI 0x08 + struct dpaa2_dev_flow { LIST_ENTRY(dpaa2_dev_flow) next; struct dpni_rule_cfg qos_rule; @@ -140,6 +142,11 @@ static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = { static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = { .protocol = RTE_BE16(0xffff), }; + +static const struct rte_flow_item_vxlan dpaa2_flow_item_vxlan_mask = { + .flags = 0xff, + .vni = "\xff\xff\xff", +}; #endif #define DPAA2_FLOW_DUMP printf @@ -688,6 +695,68 @@ dpaa2_flow_faf_advance(struct dpaa2_dev_priv *priv, return pos; } +static int +dpaa2_flow_pr_advance(struct dpaa2_dev_priv *priv, + uint32_t pr_offset, uint32_t pr_size, + enum dpaa2_flow_dist_type dist_type, int tc_id, + int *insert_offset) +{ + int offset, ret; + struct dpaa2_key_profile *key_profile; + int num, pos; + + if (dist_type == DPAA2_FLOW_QOS_TYPE) + key_profile = &priv->extract.qos_key_extract.key_profile; + else + key_profile = &priv->extract.tc_key_extract[tc_id].key_profile; + + num = key_profile->num; + + if (num >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Number of extracts overflows"); + return -EINVAL; + } + + if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) { + offset = key_profile->ip_addr_extract_off; + pos = key_profile->ip_addr_extract_pos; + key_profile->ip_addr_extract_pos++; + key_profile->ip_addr_extract_off += pr_size; + if (dist_type == DPAA2_FLOW_QOS_TYPE) { + ret = dpaa2_flow_qos_rule_insert_hole(priv, + offset, pr_size); + } else { + ret = dpaa2_flow_fs_rule_insert_hole(priv, + offset, pr_size, tc_id); + } + if (ret) + return ret; + } else { + pos = num; + } + + if (pos > 0) { + key_profile->key_offset[pos] = + key_profile->key_offset[pos - 1] + + key_profile->key_size[pos - 1]; + } else { + key_profile->key_offset[pos] = 0; + } + + key_profile->key_size[pos] = pr_size; + key_profile->prot_field[pos].type = DPAA2_PR_KEY; + key_profile->prot_field[pos].key_field = + (pr_offset << 16) | pr_size; + key_profile->num++; + + if (insert_offset) + *insert_offset = key_profile->key_offset[pos]; + + key_profile->key_max_size += pr_size; + + return pos; +} + /* Move IPv4/IPv6 addresses to fill new extract previous IP address. * Current MC/WRIOP only support generic IP extract but IP address * is not fixed, so we have to put them at end of extracts, otherwise, @@ -822,6 +891,59 @@ dpaa2_flow_faf_add_hdr(int faf_byte, return 0; } +static int +dpaa2_flow_pr_add_hdr(uint32_t pr_offset, + uint32_t pr_size, struct dpaa2_dev_priv *priv, + enum dpaa2_flow_dist_type dist_type, int tc_id, + int *insert_offset) +{ + int pos, i; + struct dpaa2_key_extract *key_extract; + struct dpkg_profile_cfg *dpkg; + struct dpkg_extract *extracts; + + if ((pr_offset + pr_size) > DPAA2_FAPR_SIZE) { + DPAA2_PMD_ERR("PR extracts(%d:%d) overflow", + pr_offset, pr_size); + return -EINVAL; + } + + if (dist_type == DPAA2_FLOW_QOS_TYPE) + key_extract = &priv->extract.qos_key_extract; + else + key_extract = &priv->extract.tc_key_extract[tc_id]; + + dpkg = &key_extract->dpkg; + extracts = dpkg->extracts; + + if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Number of extracts overflows"); + return -EINVAL; + } + + pos = dpaa2_flow_pr_advance(priv, + pr_offset, pr_size, dist_type, tc_id, + insert_offset); + if (pos < 0) + return pos; + + if (pos != dpkg->num_extracts) { + /* Not the last pos, must have IP address extract.*/ + for (i = dpkg->num_extracts - 1; i >= pos; i--) { + memcpy(&extracts[i + 1], + &extracts[i], sizeof(struct dpkg_extract)); + } + } + + extracts[pos].type = DPKG_EXTRACT_FROM_PARSE; + extracts[pos].extract.from_parse.offset = pr_offset; + extracts[pos].extract.from_parse.size = pr_size; + + dpkg->num_extracts++; + + return 0; +} + static int dpaa2_flow_extract_add_hdr(enum net_prot prot, uint32_t field, uint8_t field_size, @@ -1170,6 +1292,10 @@ dpaa2_flow_extract_search(struct dpaa2_key_profile *key_profile, prot_field[pos].key_field == key_field && prot_field[pos].type == type) return pos; + else if (type == DPAA2_PR_KEY && + prot_field[pos].key_field == key_field && + prot_field[pos].type == type) + return pos; } if (type == DPAA2_NET_PROT_KEY && @@ -1261,6 +1387,41 @@ dpaa2_flow_faf_add_rule(struct dpaa2_dev_priv *priv, return 0; } +static inline int +dpaa2_flow_pr_rule_data_set(struct dpaa2_dev_flow *flow, + struct dpaa2_key_profile *key_profile, + uint32_t pr_offset, uint32_t pr_size, + const void *key, const void *mask, + enum dpaa2_flow_dist_type dist_type) +{ + int offset; + uint32_t pr_field = pr_offset << 16 | pr_size; + + offset = dpaa2_flow_extract_key_offset(key_profile, + DPAA2_PR_KEY, NET_PROT_NONE, pr_field); + if (offset < 0) { + DPAA2_PMD_ERR("PR off(%d)/size(%d) does not exist!", + pr_offset, pr_size); + return -EINVAL; + } + + if (dist_type & DPAA2_FLOW_QOS_TYPE) { + memcpy((flow->qos_key_addr + offset), key, pr_size); + memcpy((flow->qos_mask_addr + offset), mask, pr_size); + if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT) + flow->qos_rule_size = offset + pr_size; + } + + if (dist_type & DPAA2_FLOW_FS_TYPE) { + memcpy((flow->fs_key_addr + offset), key, pr_size); + memcpy((flow->fs_mask_addr + offset), mask, pr_size); + if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT) + flow->fs_rule_size = offset + pr_size; + } + + return 0; +} + static inline int dpaa2_flow_hdr_rule_data_set(struct dpaa2_dev_flow *flow, struct dpaa2_key_profile *key_profile, @@ -1382,6 +1543,10 @@ dpaa2_flow_extract_support(const uint8_t *mask_src, mask_support = (const char *)&dpaa2_flow_item_gre_mask; size = sizeof(struct rte_flow_item_gre); break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + mask_support = (const char *)&dpaa2_flow_item_vxlan_mask; + size = sizeof(struct rte_flow_item_vxlan); + break; default: return -EINVAL; } @@ -1469,6 +1634,55 @@ dpaa2_flow_identify_by_faf(struct dpaa2_dev_priv *priv, return 0; } +static int +dpaa2_flow_add_pr_extract_rule(struct dpaa2_dev_flow *flow, + uint32_t pr_offset, uint32_t pr_size, + const void *key, const void *mask, + struct dpaa2_dev_priv *priv, int tc_id, int *recfg, + enum dpaa2_flow_dist_type dist_type) +{ + int index, ret, local_cfg = 0; + struct dpaa2_key_extract *key_extract; + struct dpaa2_key_profile *key_profile; + uint32_t pr_field = pr_offset << 16 | pr_size; + + if (dist_type == DPAA2_FLOW_QOS_TYPE) + key_extract = &priv->extract.qos_key_extract; + else + key_extract = &priv->extract.tc_key_extract[tc_id]; + + key_profile = &key_extract->key_profile; + + index = dpaa2_flow_extract_search(key_profile, + DPAA2_PR_KEY, NET_PROT_NONE, pr_field); + if (index < 0) { + ret = dpaa2_flow_pr_add_hdr(pr_offset, + pr_size, priv, + dist_type, tc_id, NULL); + if (ret) { + DPAA2_PMD_ERR("PR add off(%d)/size(%d) failed", + pr_offset, pr_size); + + return ret; + } + local_cfg |= dist_type; + } + + ret = dpaa2_flow_pr_rule_data_set(flow, key_profile, + pr_offset, pr_size, key, mask, dist_type); + if (ret) { + DPAA2_PMD_ERR("PR off(%d)/size(%d) rule data set failed", + pr_offset, pr_size); + + return ret; + } + + if (recfg) + *recfg |= local_cfg; + + return 0; +} + static int dpaa2_flow_add_hdr_extract_rule(struct dpaa2_dev_flow *flow, enum net_prot prot, uint32_t field, @@ -2545,6 +2759,90 @@ dpaa2_configure_flow_gre(struct dpaa2_dev_flow *flow, return 0; } +static int +dpaa2_configure_flow_vxlan(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) +{ + int ret, local_cfg = 0; + uint32_t group; + const struct rte_flow_item_vxlan *spec, *mask; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* Parse pattern list to get the matching parameters */ + spec = pattern->spec; + mask = pattern->mask ? + pattern->mask : &dpaa2_flow_item_vxlan_mask; + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->tc_index = attr->priority; + + if (!spec) { + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_VXLAN_FRAM, DPAA2_FLOW_QOS_TYPE, + group, &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_VXLAN_FRAM, DPAA2_FLOW_FS_TYPE, + group, &local_cfg); + if (ret) + return ret; + + (*device_configured) |= local_cfg; + return 0; + } + + if (dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_VXLAN)) { + DPAA2_PMD_WARN("Extract field(s) of VXLAN not support."); + + return -1; + } + + if (mask->flags) { + if (spec->flags != VXLAN_HF_VNI) { + DPAA2_PMD_ERR("vxlan flag(0x%02x) must be 0x%02x.", + spec->flags, VXLAN_HF_VNI); + return -EINVAL; + } + if (mask->flags != 0xff) { + DPAA2_PMD_ERR("Not support to extract vxlan flag."); + return -EINVAL; + } + } + + if (mask->vni[0] || mask->vni[1] || mask->vni[2]) { + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_VNI_OFFSET, + sizeof(mask->vni), spec->vni, + mask->vni, + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_VNI_OFFSET, + sizeof(mask->vni), spec->vni, + mask->vni, + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + (*device_configured) |= local_cfg; + + return 0; +} + static int dpaa2_configure_flow_raw(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, @@ -2760,6 +3058,9 @@ dpaa2_flow_verify_action(struct dpaa2_dev_priv *priv, } } + break; + case RTE_FLOW_ACTION_TYPE_PF: + /* Skip this action, have to add for vxlan*/ break; case RTE_FLOW_ACTION_TYPE_END: end_of_list = 1; @@ -3110,6 +3411,15 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow, return ret; } break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + ret = dpaa2_configure_flow_vxlan(flow, + dev, attr, &pattern[i], actions, error, + &is_keycfg_configured); + if (ret) { + DPAA2_PMD_ERR("VXLAN flow config failed!"); + return ret; + } + break; case RTE_FLOW_ITEM_TYPE_RAW: ret = dpaa2_configure_flow_raw(flow, dev, attr, &pattern[i], @@ -3222,6 +3532,9 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow, if (ret) return ret; + break; + case RTE_FLOW_ACTION_TYPE_PF: + /* Skip this action, have to add for vxlan*/ break; case RTE_FLOW_ACTION_TYPE_END: end_of_list = 1; -- 2.25.1