From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 516BB1B45C for ; Tue, 16 Apr 2019 05:17:08 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by orsmga105.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 15 Apr 2019 20:17:07 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.60,355,1549958400"; d="scan'208";a="161907523" Received: from dpdkx8602.sh.intel.com ([10.67.110.200]) by fmsmga002.fm.intel.com with ESMTP; 15 Apr 2019 20:17:04 -0700 From: Rosen Xu To: dev@dpdk.org Cc: ferruh.yigit@intel.com, tianfei.zhang@intel.com, dan.wei@intel.com, rosen.xu@intel.com, andy.pei@intel.com, qiming.yang@intel.com, haiyue.wang@intel.com, santos.chen@intel.com, zhang.zhang@intel.com, david.lomartire@intel.com, jia.hu@intel.com Date: Tue, 16 Apr 2019 11:17:43 +0800 Message-Id: <1555384671-42092-7-git-send-email-rosen.xu@intel.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1555384671-42092-1-git-send-email-rosen.xu@intel.com> References: <1551338000-120348-1-git-send-email-rosen.xu@intel.com> <1555384671-42092-1-git-send-email-rosen.xu@intel.com> MIME-Version: 1.0 Content-Type: text/plain; charset=y Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v9 06/14] net/ipn3ke: add IPN3KE Flow of PMD driver X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 16 Apr 2019 03:17:09 -0000 Add Intel FPGA Acceleration NIC IPN3KE Flow of PMD driver. Signed-off-by: Rosen Xu Signed-off-by: Andy Pei Signed-off-by: Dan Wei --- drivers/net/ipn3ke/Makefile | 1 + drivers/net/ipn3ke/ipn3ke_ethdev.c | 5 + drivers/net/ipn3ke/ipn3ke_ethdev.h | 1 + drivers/net/ipn3ke/ipn3ke_flow.c | 1374 +++++++++++++++++++++++++++++++ drivers/net/ipn3ke/ipn3ke_flow.h | 106 +++ drivers/net/ipn3ke/ipn3ke_representor.c | 3 +- drivers/net/ipn3ke/ipn3ke_tm.c | 1 + drivers/net/ipn3ke/meson.build | 3 +- 8 files changed, 1492 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ipn3ke/ipn3ke_flow.c create mode 100644 drivers/net/ipn3ke/ipn3ke_flow.h diff --git a/drivers/net/ipn3ke/Makefile b/drivers/net/ipn3ke/Makefile index 38d9384..8c3ae37 100644 --- a/drivers/net/ipn3ke/Makefile +++ b/drivers/net/ipn3ke/Makefile @@ -35,5 +35,6 @@ LIBABIVER := 1 SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_representor.c SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_tm.c +SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_flow.c include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/ipn3ke/ipn3ke_ethdev.c b/drivers/net/ipn3ke/ipn3ke_ethdev.c index 508ea01..9079b57 100644 --- a/drivers/net/ipn3ke/ipn3ke_ethdev.c +++ b/drivers/net/ipn3ke/ipn3ke_ethdev.c @@ -21,6 +21,7 @@ #include #include "ipn3ke_rawdev_api.h" +#include "ipn3ke_flow.h" #include "ipn3ke_logs.h" #include "ipn3ke_ethdev.h" @@ -266,6 +267,10 @@ if (ret) return ret; hw->tm_hw_enable = 1; + + ret = ipn3ke_flow_init(hw); + if (ret) + return ret; hw->flow_hw_enable = 1; } diff --git a/drivers/net/ipn3ke/ipn3ke_ethdev.h b/drivers/net/ipn3ke/ipn3ke_ethdev.h index 36ff2f8..bfda9d5 100644 --- a/drivers/net/ipn3ke/ipn3ke_ethdev.h +++ b/drivers/net/ipn3ke/ipn3ke_ethdev.h @@ -291,6 +291,7 @@ struct ipn3ke_hw { uint32_t acc_tm; uint32_t acc_flow; + struct ipn3ke_flow_list flow_list; uint32_t flow_max_entries; uint32_t flow_num_entries; diff --git a/drivers/net/ipn3ke/ipn3ke_flow.c b/drivers/net/ipn3ke/ipn3ke_flow.c new file mode 100644 index 0000000..e5937df --- /dev/null +++ b/drivers/net/ipn3ke/ipn3ke_flow.c @@ -0,0 +1,1374 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipn3ke_rawdev_api.h" +#include "ipn3ke_flow.h" +#include "ipn3ke_logs.h" +#include "ipn3ke_ethdev.h" + +/** Static initializer for items. */ +#define FLOW_PATTERNS(...) \ + ((const enum rte_flow_item_type []) { \ + __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \ + }) + +enum IPN3KE_HASH_KEY_TYPE { + IPN3KE_HASH_KEY_VXLAN, + IPN3KE_HASH_KEY_MAC, + IPN3KE_HASH_KEY_QINQ, + IPN3KE_HASH_KEY_MPLS, + IPN3KE_HASH_KEY_IP_TCP, + IPN3KE_HASH_KEY_IP_UDP, + IPN3KE_HASH_KEY_IP_NVGRE, + IPN3KE_HASH_KEY_VXLAN_IP_UDP, +}; + +struct ipn3ke_flow_parse { + uint32_t mark:1; /**< Set if the flow is marked. */ + uint32_t drop:1; /**< ACL drop. */ + uint32_t key_type:IPN3KE_FLOW_KEY_ID_BITS; + uint32_t mark_id:IPN3KE_FLOW_RESULT_UID_BITS; /**< Mark identifier. */ + uint8_t key_len; /**< Length in bit. */ + uint8_t key[BITS_TO_BYTES(IPN3KE_FLOW_KEY_DATA_BITS)]; + /**< key1, key2 */ +}; + +typedef int (*pattern_filter_t)(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser); + + +struct ipn3ke_flow_pattern { + const enum rte_flow_item_type *const items; + + pattern_filter_t filter; +}; + +/* + * @ RTL definition: + * typedef struct packed { + * logic [47:0] vxlan_inner_mac; + * logic [23:0] vxlan_vni; + * } Hash_Key_Vxlan_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_VXLAN + * RTE_FLOW_ITEM_TYPE_ETH + */ +static int +ipn3ke_pattern_vxlan(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_vxlan *vxlan = NULL; + const struct rte_flow_item_eth *eth = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (/*!item->spec || item->mask || */item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth = item->spec; + + rte_memcpy(&parser->key[0], + eth->src.addr_bytes, + ETHER_ADDR_LEN); + break; + + case RTE_FLOW_ITEM_TYPE_VXLAN: + vxlan = item->spec; + + rte_memcpy(&parser->key[6], vxlan->vni, 3); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (vxlan != NULL && eth != NULL) { + parser->key_len = 48 + 24; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [47:0] eth_smac; + * } Hash_Key_Mac_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_ETH + */ +static int +ipn3ke_pattern_mac(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_eth *eth = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth = item->spec; + + rte_memcpy(parser->key, + eth->src.addr_bytes, + ETHER_ADDR_LEN); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (eth != NULL) { + parser->key_len = 48; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [11:0] outer_vlan_id; + * logic [11:0] inner_vlan_id; + * } Hash_Key_QinQ_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_VLAN + * RTE_FLOW_ITEM_TYPE_VLAN + */ +static int +ipn3ke_pattern_qinq(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_vlan *outer_vlan = NULL; + const struct rte_flow_item_vlan *inner_vlan = NULL; + const struct rte_flow_item *item; + uint16_t tci; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_VLAN: + if (!outer_vlan) { + outer_vlan = item->spec; + + tci = rte_be_to_cpu_16(outer_vlan->tci); + parser->key[0] = (tci & 0xff0) >> 4; + parser->key[1] |= (tci & 0x00f) << 4; + } else { + inner_vlan = item->spec; + + tci = rte_be_to_cpu_16(inner_vlan->tci); + parser->key[1] |= (tci & 0xf00) >> 8; + parser->key[2] = (tci & 0x0ff); + } + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (outer_vlan != NULL && inner_vlan != NULL) { + parser->key_len = 12 + 12; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [19:0] mpls_label1; + * logic [19:0] mpls_label2; + * } Hash_Key_Mpls_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_MPLS + * RTE_FLOW_ITEM_TYPE_MPLS + */ +static int +ipn3ke_pattern_mpls(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_mpls *mpls1 = NULL; + const struct rte_flow_item_mpls *mpls2 = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_MPLS: + if (!mpls1) { + mpls1 = item->spec; + + parser->key[0] = mpls1->label_tc_s[0]; + parser->key[1] = mpls1->label_tc_s[1]; + parser->key[2] = mpls1->label_tc_s[2] & 0xf0; + } else { + mpls2 = item->spec; + + parser->key[2] |= + ((mpls2->label_tc_s[0] & 0xf0) >> 4); + parser->key[3] = + ((mpls2->label_tc_s[0] & 0xf) << 4) | + ((mpls2->label_tc_s[1] & 0xf0) >> 4); + parser->key[4] = + ((mpls2->label_tc_s[1] & 0xf) << 4) | + ((mpls2->label_tc_s[2] & 0xf0) >> 4); + } + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (mpls1 != NULL && mpls2 != NULL) { + parser->key_len = 20 + 20; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [31:0] ip_sa; + * logic [15:0] tcp_sport; + * } Hash_Key_Ip_Tcp_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_IPV4 + * RTE_FLOW_ITEM_TYPE_TCP + */ +static int +ipn3ke_pattern_ip_tcp(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_ipv4 *ipv4 = NULL; + const struct rte_flow_item_tcp *tcp = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4 = item->spec; + + rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4); + break; + + case RTE_FLOW_ITEM_TYPE_TCP: + tcp = item->spec; + + rte_memcpy(&parser->key[4], &tcp->hdr.src_port, 2); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (ipv4 != NULL && tcp != NULL) { + parser->key_len = 32 + 16; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [31:0] ip_sa; + * logic [15:0] udp_sport; + * } Hash_Key_Ip_Udp_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_IPV4 + * RTE_FLOW_ITEM_TYPE_UDP + */ +static int +ipn3ke_pattern_ip_udp(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_ipv4 *ipv4 = NULL; + const struct rte_flow_item_udp *udp = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4 = item->spec; + + rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4); + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + udp = item->spec; + + rte_memcpy(&parser->key[4], &udp->hdr.src_port, 2); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (ipv4 != NULL && udp != NULL) { + parser->key_len = 32 + 16; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [31:0] ip_sa; + * logic [15:0] udp_sport; + * logic [23:0] vsid; + * } Hash_Key_Ip_Nvgre_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_IPV4 + * RTE_FLOW_ITEM_TYPE_UDP + * RTE_FLOW_ITEM_TYPE_NVGRE + */ +static int +ipn3ke_pattern_ip_nvgre(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_nvgre *nvgre = NULL; + const struct rte_flow_item_ipv4 *ipv4 = NULL; + const struct rte_flow_item_udp *udp = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4 = item->spec; + + rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4); + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + udp = item->spec; + + rte_memcpy(&parser->key[4], &udp->hdr.src_port, 2); + break; + + case RTE_FLOW_ITEM_TYPE_NVGRE: + nvgre = item->spec; + + rte_memcpy(&parser->key[6], nvgre->tni, 3); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (ipv4 != NULL && udp != NULL && nvgre != NULL) { + parser->key_len = 32 + 16 + 24; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed{ + * logic [23:0] vxlan_vni; + * logic [31:0] ip_sa; + * logic [15:0] udp_sport; + * } Hash_Key_Vxlan_Ip_Udp_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_VXLAN + * RTE_FLOW_ITEM_TYPE_IPV4 + * RTE_FLOW_ITEM_TYPE_UDP + */ +static int +ipn3ke_pattern_vxlan_ip_udp(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_vxlan *vxlan = NULL; + const struct rte_flow_item_ipv4 *ipv4 = NULL; + const struct rte_flow_item_udp *udp = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_VXLAN: + vxlan = item->spec; + + rte_memcpy(&parser->key[0], vxlan->vni, 3); + break; + + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4 = item->spec; + + rte_memcpy(&parser->key[3], &ipv4->hdr.src_addr, 4); + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + udp = item->spec; + + rte_memcpy(&parser->key[7], &udp->hdr.src_port, 2); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (vxlan != NULL && ipv4 != NULL && udp != NULL) { + parser->key_len = 24 + 32 + 16; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +static const struct ipn3ke_flow_pattern ipn3ke_supported_patterns[] = { + [IPN3KE_HASH_KEY_VXLAN] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH), + .filter = ipn3ke_pattern_vxlan, + }, + + [IPN3KE_HASH_KEY_MAC] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_ETH), + .filter = ipn3ke_pattern_mac, + }, + + [IPN3KE_HASH_KEY_QINQ] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN), + .filter = ipn3ke_pattern_qinq, + }, + + [IPN3KE_HASH_KEY_MPLS] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_MPLS, + RTE_FLOW_ITEM_TYPE_MPLS), + .filter = ipn3ke_pattern_mpls, + }, + + [IPN3KE_HASH_KEY_IP_TCP] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP), + .filter = ipn3ke_pattern_ip_tcp, + }, + + [IPN3KE_HASH_KEY_IP_UDP] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP), + .filter = ipn3ke_pattern_ip_udp, + }, + + [IPN3KE_HASH_KEY_IP_NVGRE] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_NVGRE), + .filter = ipn3ke_pattern_ip_nvgre, + }, + + [IPN3KE_HASH_KEY_VXLAN_IP_UDP] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP), + .filter = ipn3ke_pattern_vxlan_ip_udp, + }, +}; + +static int +ipn3ke_flow_convert_attributes(const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + if (!attr) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, + "NULL attribute."); + return -rte_errno; + } + + if (attr->group) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, + "groups are not supported"); + return -rte_errno; + } + + if (attr->egress) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + NULL, + "egress is not supported"); + return -rte_errno; + } + + if (attr->transfer) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, + "transfer is not supported"); + return -rte_errno; + } + + if (!attr->ingress) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + NULL, + "only ingress is supported"); + return -rte_errno; + } + + return 0; +} + +static int +ipn3ke_flow_convert_actions(const struct rte_flow_action actions[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_action_mark *mark = NULL; + + if (!actions) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, + "NULL action."); + return -rte_errno; + } + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + + case RTE_FLOW_ACTION_TYPE_MARK: + if (mark) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "duplicated mark"); + return -rte_errno; + } + + mark = actions->conf; + if (!mark) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "mark must be defined"); + return -rte_errno; + } else if (mark->id > IPN3KE_FLOW_RESULT_UID_MAX) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "mark id is out of range"); + return -rte_errno; + } + + parser->mark = 1; + parser->mark_id = mark->id; + break; + + case RTE_FLOW_ACTION_TYPE_DROP: + parser->drop = 1; + break; + + default: + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "invalid action"); + return -rte_errno; + } + } + + if (!parser->drop && !parser->mark) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "no valid actions"); + return -rte_errno; + } + + return 0; +} + +static bool +ipn3ke_match_pattern(const enum rte_flow_item_type *patterns, + const struct rte_flow_item *input) +{ + const struct rte_flow_item *item = input; + + while ((*patterns == item->type) && + (*patterns != RTE_FLOW_ITEM_TYPE_END)) { + patterns++; + item++; + } + + return (*patterns == RTE_FLOW_ITEM_TYPE_END && + item->type == RTE_FLOW_ITEM_TYPE_END); +} + +static pattern_filter_t +ipn3ke_find_filter_func(const struct rte_flow_item *input, + uint32_t *idx) +{ + pattern_filter_t filter = NULL; + uint32_t i; + + for (i = 0; i < RTE_DIM(ipn3ke_supported_patterns); i++) { + if (ipn3ke_match_pattern(ipn3ke_supported_patterns[i].items, + input)) { + filter = ipn3ke_supported_patterns[i].filter; + *idx = i; + break; + } + } + + return filter; +} + +static int +ipn3ke_flow_convert_items(const struct rte_flow_item items[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + pattern_filter_t filter = NULL; + uint32_t idx; + + if (!items) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, + "NULL pattern."); + return -rte_errno; + } + + filter = ipn3ke_find_filter_func(items, &idx); + + if (!filter) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + items, + "Unsupported pattern"); + return -rte_errno; + } + + parser->key_type = idx; + + return filter(items, error, parser); +} + +/* Put the least @nbits of @data into @offset of @dst bits stream, and + * the @offset starts from MSB to LSB in each byte. + * + * MSB LSB + * +------+------+------+------+ + * | | | | | + * +------+------+------+------+ + * ^ ^ + * |<- data: nbits ->| + * | + * offset + */ +static void +copy_data_bits(uint8_t *dst, uint64_t data, + uint32_t offset, uint8_t nbits) +{ + uint8_t set, *p = &dst[offset / BITS_PER_BYTE]; + uint8_t bits_to_set = BITS_PER_BYTE - (offset % BITS_PER_BYTE); + uint8_t mask_to_set = 0xff >> (offset % BITS_PER_BYTE); + uint32_t size = offset + nbits; + + if (nbits > (sizeof(data) * BITS_PER_BYTE)) { + IPN3KE_AFU_PMD_ERR("nbits is out of range"); + return; + } + + while (nbits - bits_to_set >= 0) { + set = data >> (nbits - bits_to_set); + + *p &= ~mask_to_set; + *p |= (set & mask_to_set); + + nbits -= bits_to_set; + bits_to_set = BITS_PER_BYTE; + mask_to_set = 0xff; + p++; + } + + if (nbits) { + uint8_t shift = BITS_PER_BYTE - (size % BITS_PER_BYTE); + + set = data << shift; + mask_to_set = 0xff << shift; + + *p &= ~mask_to_set; + *p |= (set & mask_to_set); + } +} + +static void +ipn3ke_flow_key_generation(struct ipn3ke_flow_parse *parser, + struct rte_flow *flow) +{ + uint32_t i, shift_bytes, len_in_bytes, offset; + uint64_t key; + uint8_t *dst; + + dst = flow->rule.key; + + copy_data_bits(dst, + parser->key_type, + IPN3KE_FLOW_KEY_ID_OFFSET, + IPN3KE_FLOW_KEY_ID_BITS); + + /* The MSb of key is filled to 0 when it is less than + * IPN3KE_FLOW_KEY_DATA_BITS bit. And the parsed key data is + * save as MSB byte first in the array, it needs to move + * the bits before formatting them. + */ + key = 0; + shift_bytes = 0; + len_in_bytes = BITS_TO_BYTES(parser->key_len); + offset = (IPN3KE_FLOW_KEY_DATA_OFFSET + + IPN3KE_FLOW_KEY_DATA_BITS - + parser->key_len); + + for (i = 0; i < len_in_bytes; i++) { + key = (key << 8) | parser->key[i]; + + if (++shift_bytes == sizeof(key)) { + shift_bytes = 0; + + copy_data_bits(dst, key, offset, + sizeof(key) * BITS_PER_BYTE); + offset += sizeof(key) * BITS_PER_BYTE; + key = 0; + } + } + + if (shift_bytes != 0) { + uint32_t rem_bits; + + rem_bits = parser->key_len % (sizeof(key) * BITS_PER_BYTE); + key >>= (shift_bytes * 8 - rem_bits); + copy_data_bits(dst, key, offset, rem_bits); + } +} + +static void +ipn3ke_flow_result_generation(struct ipn3ke_flow_parse *parser, + struct rte_flow *flow) +{ + uint8_t *dst; + + if (parser->drop) + return; + + dst = flow->rule.result; + + copy_data_bits(dst, + 1, + IPN3KE_FLOW_RESULT_ACL_OFFSET, + IPN3KE_FLOW_RESULT_ACL_BITS); + + copy_data_bits(dst, + parser->mark_id, + IPN3KE_FLOW_RESULT_UID_OFFSET, + IPN3KE_FLOW_RESULT_UID_BITS); +} + +#define MHL_COMMAND_TIME_COUNT 0xFFFF +#define MHL_COMMAND_TIME_INTERVAL_US 10 + +static int +ipn3ke_flow_hw_update(struct ipn3ke_hw *hw, + struct rte_flow *flow, uint32_t is_add) +{ + uint32_t *pdata = NULL; + uint32_t data; + uint32_t time_out = MHL_COMMAND_TIME_COUNT; + uint32_t i; + + IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump start\n"); + + pdata = (uint32_t *)flow->rule.key; + IPN3KE_AFU_PMD_DEBUG(" - key :"); + + for (i = 0; i < RTE_DIM(flow->rule.key); i++) + IPN3KE_AFU_PMD_DEBUG(" %02x", flow->rule.key[i]); + + for (i = 0; i < 4; i++) + IPN3KE_AFU_PMD_DEBUG(" %02x", ipn3ke_swap32(pdata[3 - i])); + IPN3KE_AFU_PMD_DEBUG("\n"); + + pdata = (uint32_t *)flow->rule.result; + IPN3KE_AFU_PMD_DEBUG(" - result:"); + + for (i = 0; i < RTE_DIM(flow->rule.result); i++) + IPN3KE_AFU_PMD_DEBUG(" %02x", flow->rule.result[i]); + + for (i = 0; i < 1; i++) + IPN3KE_AFU_PMD_DEBUG(" %02x", pdata[i]); + IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump end\n"); + + pdata = (uint32_t *)flow->rule.key; + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_KEY_0, + 0, + ipn3ke_swap32(pdata[3]), + IPN3KE_CLF_MHL_KEY_MASK); + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_KEY_1, + 0, + ipn3ke_swap32(pdata[2]), + IPN3KE_CLF_MHL_KEY_MASK); + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_KEY_2, + 0, + ipn3ke_swap32(pdata[1]), + IPN3KE_CLF_MHL_KEY_MASK); + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_KEY_3, + 0, + ipn3ke_swap32(pdata[0]), + IPN3KE_CLF_MHL_KEY_MASK); + + pdata = (uint32_t *)flow->rule.result; + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_RES, + 0, + ipn3ke_swap32(pdata[0]), + IPN3KE_CLF_MHL_RES_MASK); + + /* insert/delete the key and result */ + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + 0x80000000); + time_out = MHL_COMMAND_TIME_COUNT; + while (IPN3KE_BIT_ISSET(data, IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY) && + (time_out > 0)) { + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + 0x80000000); + time_out--; + rte_delay_us(MHL_COMMAND_TIME_INTERVAL_US); + } + if (!time_out) + return -1; + if (is_add) + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + IPN3KE_CLF_MHL_MGMT_CTRL_INSERT, + 0x3); + else + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + IPN3KE_CLF_MHL_MGMT_CTRL_DELETE, + 0x3); + + return 0; +} + +static int +ipn3ke_flow_hw_flush(struct ipn3ke_hw *hw) +{ + uint32_t data; + uint32_t time_out = MHL_COMMAND_TIME_COUNT; + + /* flush the MHL lookup table */ + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + 0x80000000); + time_out = MHL_COMMAND_TIME_COUNT; + while (IPN3KE_BIT_ISSET(data, IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY) && + (time_out > 0)) { + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + 0x80000000); + time_out--; + rte_delay_us(MHL_COMMAND_TIME_INTERVAL_US); + } + if (!time_out) + return -1; + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + IPN3KE_CLF_MHL_MGMT_CTRL_FLUSH, + 0x3); + + return 0; +} + +static void +ipn3ke_flow_convert_finalise(struct ipn3ke_hw *hw, + struct ipn3ke_flow_parse *parser, struct rte_flow *flow) +{ + ipn3ke_flow_key_generation(parser, flow); + ipn3ke_flow_result_generation(parser, flow); + ipn3ke_flow_hw_update(hw, flow, 1); +} + +static int +ipn3ke_flow_convert(const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], struct rte_flow_error *error, + struct ipn3ke_flow_parse *parser) +{ + int ret; + + ret = ipn3ke_flow_convert_attributes(attr, error); + if (ret) + return ret; + + ret = ipn3ke_flow_convert_actions(actions, error, parser); + if (ret) + return ret; + + ret = ipn3ke_flow_convert_items(items, error, parser); + if (ret) + return ret; + + return 0; +} + +static int +ipn3ke_flow_validate(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], struct rte_flow_error *error) +{ + struct ipn3ke_flow_parse parser = {0}; + return ipn3ke_flow_convert(attr, pattern, actions, error, &parser); +} + +static struct rte_flow * +ipn3ke_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], struct rte_flow_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_flow_parse parser = {0}; + struct rte_flow *flow; + int ret; + + if (hw->flow_num_entries == hw->flow_max_entries) { + rte_flow_error_set(error, + ENOBUFS, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "The flow table is full."); + return NULL; + } + + ret = ipn3ke_flow_convert(attr, pattern, actions, error, &parser); + if (ret < 0) { + rte_flow_error_set(error, + -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Failed to create flow."); + return NULL; + } + + flow = rte_zmalloc("ipn3ke_flow", sizeof(struct rte_flow), 0); + if (!flow) { + rte_flow_error_set(error, + ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Failed to allocate memory"); + return flow; + } + + ipn3ke_flow_convert_finalise(hw, &parser, flow); + + TAILQ_INSERT_TAIL(&hw->flow_list, flow, next); + + return flow; +} + +static int +ipn3ke_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, struct rte_flow_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + int ret = 0; + + ret = ipn3ke_flow_hw_update(hw, flow, 0); + if (!ret) { + TAILQ_REMOVE(&hw->flow_list, flow, next); + rte_free(flow); + } else { + rte_flow_error_set(error, + -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Failed to destroy flow."); + } + + return ret; +} + +static int +ipn3ke_flow_flush(struct rte_eth_dev *dev, + __rte_unused struct rte_flow_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct rte_flow *flow, *temp; + + TAILQ_FOREACH_SAFE(flow, &hw->flow_list, next, temp) { + TAILQ_REMOVE(&hw->flow_list, flow, next); + rte_free(flow); + } + + return ipn3ke_flow_hw_flush(hw); +} + +int ipn3ke_flow_init(void *dev) +{ + struct ipn3ke_hw *hw = (struct ipn3ke_hw *)dev; + uint32_t data; + + /* disable rx classifier bypass */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_RX_TEST, + 0, 0, 0x1); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_RX_TEST, + 0, + 0x1); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_TEST: %x\n", data); + + /* configure base mac address */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_BASE_DST_MAC_ADDR_HI, + 0, + 0x2457, + 0xFFFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_BASE_DST_MAC_ADDR_HI, + 0, + 0xFFFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_HI: %x\n", data); + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW, + 0, + 0x9bdf1000, + 0xFFFFFFFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW, + 0, + 0xFFFFFFFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW: %x\n", data); + + + /* configure hash lookup rules enable */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_LKUP_ENABLE, + 0, + 0xFD, + 0xFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_LKUP_ENABLE, + 0, + 0xFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_LKUP_ENABLE: %x\n", data); + + + /* configure rx parse config, settings associatied with VxLAN */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_RX_PARSE_CFG, + 0, + 0x212b5, + 0x3FFFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_RX_PARSE_CFG, + 0, + 0x3FFFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_PARSE_CFG: %x\n", data); + + + /* configure QinQ S-Tag */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_QINQ_STAG, + 0, + 0x88a8, + 0xFFFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_QINQ_STAG, + 0, + 0xFFFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_QINQ_STAG: %x\n", data); + + + /* configure gen ctrl */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_GEN_CTRL, + 0, + 0x3, + 0x3); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_GEN_CTRL, + 0, + 0x1F); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_GEN_CTRL: %x\n", data); + + + /* clear monitoring register */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_MON_0, + 0, + 0xFFFFFFFF, + 0xFFFFFFFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_MON_0, + 0, + 0xFFFFFFFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_MON_0: %x\n", data); + + + ipn3ke_flow_hw_flush(hw); + + TAILQ_INIT(&hw->flow_list); + hw->flow_max_entries = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_EM_NUM, + 0, + 0xFFFFFFFF); + hw->flow_num_entries = 0; + + return 0; +} + +const struct rte_flow_ops ipn3ke_flow_ops = { + .validate = ipn3ke_flow_validate, + .create = ipn3ke_flow_create, + .destroy = ipn3ke_flow_destroy, + .flush = ipn3ke_flow_flush, +}; + diff --git a/drivers/net/ipn3ke/ipn3ke_flow.h b/drivers/net/ipn3ke/ipn3ke_flow.h new file mode 100644 index 0000000..ef1a61f --- /dev/null +++ b/drivers/net/ipn3ke/ipn3ke_flow.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#ifndef _IPN3KE_FLOW_H_ +#define _IPN3KE_FLOW_H_ + +/** + * Expand the length to DWORD alignment with 'Unused' field. + * + * FLOW KEY: + * | Unused |Ruler id (id) | Key1 Key2 … (data) | + * |--------+---------------+--------------------| + * | 17bits | 3 bits | Total 108 bits | + * MSB ---> LSB + * + * Note: And the MSb of key data is filled to 0 when it is less + * than 108 bit. + */ +#define IPN3KE_FLOW_KEY_UNUSED_BITS 17 +#define IPN3KE_FLOW_KEY_ID_BITS 3 +#define IPN3KE_FLOW_KEY_DATA_BITS 108 + +#define IPN3KE_FLOW_KEY_TOTAL_BITS \ + (IPN3KE_FLOW_KEY_UNUSED_BITS + \ + IPN3KE_FLOW_KEY_ID_BITS + \ + IPN3KE_FLOW_KEY_DATA_BITS) + +#define IPN3KE_FLOW_KEY_ID_OFFSET \ + (IPN3KE_FLOW_KEY_UNUSED_BITS) + +#define IPN3KE_FLOW_KEY_DATA_OFFSET \ + (IPN3KE_FLOW_KEY_ID_OFFSET + IPN3KE_FLOW_KEY_ID_BITS) + +/** + * Expand the length to DWORD alignment with 'Unused' field. + * + * FLOW RESULT: + * | Unused | enable (acl) | uid | + * |---------+--------------+--------------| + * | 15 bits | 1 bit | 16 bits | + * MSB ---> LSB + */ + +#define IPN3KE_FLOW_RESULT_UNUSED_BITS 15 +#define IPN3KE_FLOW_RESULT_ACL_BITS 1 +#define IPN3KE_FLOW_RESULT_UID_BITS 16 + +#define IPN3KE_FLOW_RESULT_TOTAL_BITS \ + (IPN3KE_FLOW_RESULT_UNUSED_BITS + \ + IPN3KE_FLOW_RESULT_ACL_BITS + \ + IPN3KE_FLOW_RESULT_UID_BITS) + +#define IPN3KE_FLOW_RESULT_ACL_OFFSET \ + (IPN3KE_FLOW_RESULT_UNUSED_BITS) + +#define IPN3KE_FLOW_RESULT_UID_OFFSET \ + (IPN3KE_FLOW_RESULT_ACL_OFFSET + IPN3KE_FLOW_RESULT_ACL_BITS) + +#define IPN3KE_FLOW_RESULT_UID_MAX \ + ((1UL << IPN3KE_FLOW_RESULT_UID_BITS) - 1) + +#ifndef BITS_PER_BYTE +#define BITS_PER_BYTE 8 +#endif +#define BITS_TO_BYTES(bits) \ + (((bits) + BITS_PER_BYTE - 1) / BITS_PER_BYTE) + +struct ipn3ke_flow_rule { + uint8_t key[BITS_TO_BYTES(IPN3KE_FLOW_KEY_TOTAL_BITS)]; + uint8_t result[BITS_TO_BYTES(IPN3KE_FLOW_RESULT_TOTAL_BITS)]; +}; + +struct rte_flow { + TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ + + struct ipn3ke_flow_rule rule; +}; + +TAILQ_HEAD(ipn3ke_flow_list, rte_flow); + +static inline uint16_t ipn3ke_swap16(uint16_t x) +{ + return ((x & 0xff) << 8) | ((x >> 8) & 0xff); +} + +static inline uint32_t ipn3ke_swap32(uint32_t x) +{ + uint32_t high, low; + uint32_t high1, low1; + + high = (x >> 16) & 0xffff; + low = x & 0xffff; + high1 = ipn3ke_swap16(low); + high1 = high1 << 16; + low1 = ipn3ke_swap16(high); + low1 = low1 & 0xffff; + + return high1 | low1; +} + +extern const struct rte_flow_ops ipn3ke_flow_ops; + +int ipn3ke_flow_init(void *dev); + +#endif /* _IPN3KE_FLOW_H_ */ diff --git a/drivers/net/ipn3ke/ipn3ke_representor.c b/drivers/net/ipn3ke/ipn3ke_representor.c index 63098bf..cf3b214 100644 --- a/drivers/net/ipn3ke/ipn3ke_representor.c +++ b/drivers/net/ipn3ke/ipn3ke_representor.c @@ -21,6 +21,7 @@ #include #include "ipn3ke_rawdev_api.h" +#include "ipn3ke_flow.h" #include "ipn3ke_logs.h" #include "ipn3ke_ethdev.h" @@ -746,7 +747,7 @@ case RTE_ETH_FILTER_GENERIC: if (filter_op != RTE_ETH_FILTER_GET) return -EINVAL; - *(const void **)arg = NULL; + *(const void **)arg = &ipn3ke_flow_ops; break; default: IPN3KE_AFU_PMD_WARN("Filter type (%d) not supported", diff --git a/drivers/net/ipn3ke/ipn3ke_tm.c b/drivers/net/ipn3ke/ipn3ke_tm.c index 8baa2fb..4bcf3aa 100644 --- a/drivers/net/ipn3ke/ipn3ke_tm.c +++ b/drivers/net/ipn3ke/ipn3ke_tm.c @@ -23,6 +23,7 @@ #include #include "ipn3ke_rawdev_api.h" +#include "ipn3ke_flow.h" #include "ipn3ke_logs.h" #include "ipn3ke_ethdev.h" diff --git a/drivers/net/ipn3ke/meson.build b/drivers/net/ipn3ke/meson.build index 3a95efc..74b4d7c 100644 --- a/drivers/net/ipn3ke/meson.build +++ b/drivers/net/ipn3ke/meson.build @@ -12,5 +12,6 @@ allow_experimental_apis = true sources += files('ipn3ke_ethdev.c', 'ipn3ke_representor.c', - 'ipn3ke_tm.c') + 'ipn3ke_tm.c', + 'ipn3ke_flow.c') deps += ['bus_ifpga', 'sched'] -- 1.8.3.1 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id E1F17A00E6 for ; Tue, 16 Apr 2019 05:17:54 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A90C41B473; Tue, 16 Apr 2019 05:17:12 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 516BB1B45C for ; Tue, 16 Apr 2019 05:17:08 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by orsmga105.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 15 Apr 2019 20:17:07 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.60,355,1549958400"; d="scan'208";a="161907523" Received: from dpdkx8602.sh.intel.com ([10.67.110.200]) by fmsmga002.fm.intel.com with ESMTP; 15 Apr 2019 20:17:04 -0700 From: Rosen Xu To: dev@dpdk.org Cc: ferruh.yigit@intel.com, tianfei.zhang@intel.com, dan.wei@intel.com, rosen.xu@intel.com, andy.pei@intel.com, qiming.yang@intel.com, haiyue.wang@intel.com, santos.chen@intel.com, zhang.zhang@intel.com, david.lomartire@intel.com, jia.hu@intel.com Date: Tue, 16 Apr 2019 11:17:43 +0800 Message-Id: <1555384671-42092-7-git-send-email-rosen.xu@intel.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1555384671-42092-1-git-send-email-rosen.xu@intel.com> References: <1551338000-120348-1-git-send-email-rosen.xu@intel.com> <1555384671-42092-1-git-send-email-rosen.xu@intel.com> MIME-Version: 1.0 Content-Type: text/plain; charset="UTF-8" Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v9 06/14] net/ipn3ke: add IPN3KE Flow of PMD driver X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Message-ID: <20190416031743.J3cDmHczDR1NKNhohzXiT44kG4q9F3KYrs3M4Lcf7mA@z> Add Intel FPGA Acceleration NIC IPN3KE Flow of PMD driver. Signed-off-by: Rosen Xu Signed-off-by: Andy Pei Signed-off-by: Dan Wei --- drivers/net/ipn3ke/Makefile | 1 + drivers/net/ipn3ke/ipn3ke_ethdev.c | 5 + drivers/net/ipn3ke/ipn3ke_ethdev.h | 1 + drivers/net/ipn3ke/ipn3ke_flow.c | 1374 +++++++++++++++++++++++++++++++ drivers/net/ipn3ke/ipn3ke_flow.h | 106 +++ drivers/net/ipn3ke/ipn3ke_representor.c | 3 +- drivers/net/ipn3ke/ipn3ke_tm.c | 1 + drivers/net/ipn3ke/meson.build | 3 +- 8 files changed, 1492 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ipn3ke/ipn3ke_flow.c create mode 100644 drivers/net/ipn3ke/ipn3ke_flow.h diff --git a/drivers/net/ipn3ke/Makefile b/drivers/net/ipn3ke/Makefile index 38d9384..8c3ae37 100644 --- a/drivers/net/ipn3ke/Makefile +++ b/drivers/net/ipn3ke/Makefile @@ -35,5 +35,6 @@ LIBABIVER := 1 SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_representor.c SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_tm.c +SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_flow.c include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/ipn3ke/ipn3ke_ethdev.c b/drivers/net/ipn3ke/ipn3ke_ethdev.c index 508ea01..9079b57 100644 --- a/drivers/net/ipn3ke/ipn3ke_ethdev.c +++ b/drivers/net/ipn3ke/ipn3ke_ethdev.c @@ -21,6 +21,7 @@ #include #include "ipn3ke_rawdev_api.h" +#include "ipn3ke_flow.h" #include "ipn3ke_logs.h" #include "ipn3ke_ethdev.h" @@ -266,6 +267,10 @@ if (ret) return ret; hw->tm_hw_enable = 1; + + ret = ipn3ke_flow_init(hw); + if (ret) + return ret; hw->flow_hw_enable = 1; } diff --git a/drivers/net/ipn3ke/ipn3ke_ethdev.h b/drivers/net/ipn3ke/ipn3ke_ethdev.h index 36ff2f8..bfda9d5 100644 --- a/drivers/net/ipn3ke/ipn3ke_ethdev.h +++ b/drivers/net/ipn3ke/ipn3ke_ethdev.h @@ -291,6 +291,7 @@ struct ipn3ke_hw { uint32_t acc_tm; uint32_t acc_flow; + struct ipn3ke_flow_list flow_list; uint32_t flow_max_entries; uint32_t flow_num_entries; diff --git a/drivers/net/ipn3ke/ipn3ke_flow.c b/drivers/net/ipn3ke/ipn3ke_flow.c new file mode 100644 index 0000000..e5937df --- /dev/null +++ b/drivers/net/ipn3ke/ipn3ke_flow.c @@ -0,0 +1,1374 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipn3ke_rawdev_api.h" +#include "ipn3ke_flow.h" +#include "ipn3ke_logs.h" +#include "ipn3ke_ethdev.h" + +/** Static initializer for items. */ +#define FLOW_PATTERNS(...) \ + ((const enum rte_flow_item_type []) { \ + __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \ + }) + +enum IPN3KE_HASH_KEY_TYPE { + IPN3KE_HASH_KEY_VXLAN, + IPN3KE_HASH_KEY_MAC, + IPN3KE_HASH_KEY_QINQ, + IPN3KE_HASH_KEY_MPLS, + IPN3KE_HASH_KEY_IP_TCP, + IPN3KE_HASH_KEY_IP_UDP, + IPN3KE_HASH_KEY_IP_NVGRE, + IPN3KE_HASH_KEY_VXLAN_IP_UDP, +}; + +struct ipn3ke_flow_parse { + uint32_t mark:1; /**< Set if the flow is marked. */ + uint32_t drop:1; /**< ACL drop. */ + uint32_t key_type:IPN3KE_FLOW_KEY_ID_BITS; + uint32_t mark_id:IPN3KE_FLOW_RESULT_UID_BITS; /**< Mark identifier. */ + uint8_t key_len; /**< Length in bit. */ + uint8_t key[BITS_TO_BYTES(IPN3KE_FLOW_KEY_DATA_BITS)]; + /**< key1, key2 */ +}; + +typedef int (*pattern_filter_t)(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser); + + +struct ipn3ke_flow_pattern { + const enum rte_flow_item_type *const items; + + pattern_filter_t filter; +}; + +/* + * @ RTL definition: + * typedef struct packed { + * logic [47:0] vxlan_inner_mac; + * logic [23:0] vxlan_vni; + * } Hash_Key_Vxlan_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_VXLAN + * RTE_FLOW_ITEM_TYPE_ETH + */ +static int +ipn3ke_pattern_vxlan(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_vxlan *vxlan = NULL; + const struct rte_flow_item_eth *eth = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (/*!item->spec || item->mask || */item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth = item->spec; + + rte_memcpy(&parser->key[0], + eth->src.addr_bytes, + ETHER_ADDR_LEN); + break; + + case RTE_FLOW_ITEM_TYPE_VXLAN: + vxlan = item->spec; + + rte_memcpy(&parser->key[6], vxlan->vni, 3); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (vxlan != NULL && eth != NULL) { + parser->key_len = 48 + 24; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [47:0] eth_smac; + * } Hash_Key_Mac_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_ETH + */ +static int +ipn3ke_pattern_mac(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_eth *eth = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth = item->spec; + + rte_memcpy(parser->key, + eth->src.addr_bytes, + ETHER_ADDR_LEN); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (eth != NULL) { + parser->key_len = 48; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [11:0] outer_vlan_id; + * logic [11:0] inner_vlan_id; + * } Hash_Key_QinQ_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_VLAN + * RTE_FLOW_ITEM_TYPE_VLAN + */ +static int +ipn3ke_pattern_qinq(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_vlan *outer_vlan = NULL; + const struct rte_flow_item_vlan *inner_vlan = NULL; + const struct rte_flow_item *item; + uint16_t tci; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_VLAN: + if (!outer_vlan) { + outer_vlan = item->spec; + + tci = rte_be_to_cpu_16(outer_vlan->tci); + parser->key[0] = (tci & 0xff0) >> 4; + parser->key[1] |= (tci & 0x00f) << 4; + } else { + inner_vlan = item->spec; + + tci = rte_be_to_cpu_16(inner_vlan->tci); + parser->key[1] |= (tci & 0xf00) >> 8; + parser->key[2] = (tci & 0x0ff); + } + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (outer_vlan != NULL && inner_vlan != NULL) { + parser->key_len = 12 + 12; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [19:0] mpls_label1; + * logic [19:0] mpls_label2; + * } Hash_Key_Mpls_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_MPLS + * RTE_FLOW_ITEM_TYPE_MPLS + */ +static int +ipn3ke_pattern_mpls(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_mpls *mpls1 = NULL; + const struct rte_flow_item_mpls *mpls2 = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_MPLS: + if (!mpls1) { + mpls1 = item->spec; + + parser->key[0] = mpls1->label_tc_s[0]; + parser->key[1] = mpls1->label_tc_s[1]; + parser->key[2] = mpls1->label_tc_s[2] & 0xf0; + } else { + mpls2 = item->spec; + + parser->key[2] |= + ((mpls2->label_tc_s[0] & 0xf0) >> 4); + parser->key[3] = + ((mpls2->label_tc_s[0] & 0xf) << 4) | + ((mpls2->label_tc_s[1] & 0xf0) >> 4); + parser->key[4] = + ((mpls2->label_tc_s[1] & 0xf) << 4) | + ((mpls2->label_tc_s[2] & 0xf0) >> 4); + } + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (mpls1 != NULL && mpls2 != NULL) { + parser->key_len = 20 + 20; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [31:0] ip_sa; + * logic [15:0] tcp_sport; + * } Hash_Key_Ip_Tcp_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_IPV4 + * RTE_FLOW_ITEM_TYPE_TCP + */ +static int +ipn3ke_pattern_ip_tcp(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_ipv4 *ipv4 = NULL; + const struct rte_flow_item_tcp *tcp = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4 = item->spec; + + rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4); + break; + + case RTE_FLOW_ITEM_TYPE_TCP: + tcp = item->spec; + + rte_memcpy(&parser->key[4], &tcp->hdr.src_port, 2); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (ipv4 != NULL && tcp != NULL) { + parser->key_len = 32 + 16; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [31:0] ip_sa; + * logic [15:0] udp_sport; + * } Hash_Key_Ip_Udp_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_IPV4 + * RTE_FLOW_ITEM_TYPE_UDP + */ +static int +ipn3ke_pattern_ip_udp(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_ipv4 *ipv4 = NULL; + const struct rte_flow_item_udp *udp = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4 = item->spec; + + rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4); + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + udp = item->spec; + + rte_memcpy(&parser->key[4], &udp->hdr.src_port, 2); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (ipv4 != NULL && udp != NULL) { + parser->key_len = 32 + 16; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [31:0] ip_sa; + * logic [15:0] udp_sport; + * logic [23:0] vsid; + * } Hash_Key_Ip_Nvgre_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_IPV4 + * RTE_FLOW_ITEM_TYPE_UDP + * RTE_FLOW_ITEM_TYPE_NVGRE + */ +static int +ipn3ke_pattern_ip_nvgre(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_nvgre *nvgre = NULL; + const struct rte_flow_item_ipv4 *ipv4 = NULL; + const struct rte_flow_item_udp *udp = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4 = item->spec; + + rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4); + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + udp = item->spec; + + rte_memcpy(&parser->key[4], &udp->hdr.src_port, 2); + break; + + case RTE_FLOW_ITEM_TYPE_NVGRE: + nvgre = item->spec; + + rte_memcpy(&parser->key[6], nvgre->tni, 3); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (ipv4 != NULL && udp != NULL && nvgre != NULL) { + parser->key_len = 32 + 16 + 24; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed{ + * logic [23:0] vxlan_vni; + * logic [31:0] ip_sa; + * logic [15:0] udp_sport; + * } Hash_Key_Vxlan_Ip_Udp_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_VXLAN + * RTE_FLOW_ITEM_TYPE_IPV4 + * RTE_FLOW_ITEM_TYPE_UDP + */ +static int +ipn3ke_pattern_vxlan_ip_udp(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_vxlan *vxlan = NULL; + const struct rte_flow_item_ipv4 *ipv4 = NULL; + const struct rte_flow_item_udp *udp = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_VXLAN: + vxlan = item->spec; + + rte_memcpy(&parser->key[0], vxlan->vni, 3); + break; + + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4 = item->spec; + + rte_memcpy(&parser->key[3], &ipv4->hdr.src_addr, 4); + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + udp = item->spec; + + rte_memcpy(&parser->key[7], &udp->hdr.src_port, 2); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (vxlan != NULL && ipv4 != NULL && udp != NULL) { + parser->key_len = 24 + 32 + 16; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +static const struct ipn3ke_flow_pattern ipn3ke_supported_patterns[] = { + [IPN3KE_HASH_KEY_VXLAN] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH), + .filter = ipn3ke_pattern_vxlan, + }, + + [IPN3KE_HASH_KEY_MAC] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_ETH), + .filter = ipn3ke_pattern_mac, + }, + + [IPN3KE_HASH_KEY_QINQ] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN), + .filter = ipn3ke_pattern_qinq, + }, + + [IPN3KE_HASH_KEY_MPLS] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_MPLS, + RTE_FLOW_ITEM_TYPE_MPLS), + .filter = ipn3ke_pattern_mpls, + }, + + [IPN3KE_HASH_KEY_IP_TCP] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP), + .filter = ipn3ke_pattern_ip_tcp, + }, + + [IPN3KE_HASH_KEY_IP_UDP] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP), + .filter = ipn3ke_pattern_ip_udp, + }, + + [IPN3KE_HASH_KEY_IP_NVGRE] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_NVGRE), + .filter = ipn3ke_pattern_ip_nvgre, + }, + + [IPN3KE_HASH_KEY_VXLAN_IP_UDP] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP), + .filter = ipn3ke_pattern_vxlan_ip_udp, + }, +}; + +static int +ipn3ke_flow_convert_attributes(const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + if (!attr) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, + "NULL attribute."); + return -rte_errno; + } + + if (attr->group) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, + "groups are not supported"); + return -rte_errno; + } + + if (attr->egress) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + NULL, + "egress is not supported"); + return -rte_errno; + } + + if (attr->transfer) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, + "transfer is not supported"); + return -rte_errno; + } + + if (!attr->ingress) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + NULL, + "only ingress is supported"); + return -rte_errno; + } + + return 0; +} + +static int +ipn3ke_flow_convert_actions(const struct rte_flow_action actions[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_action_mark *mark = NULL; + + if (!actions) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, + "NULL action."); + return -rte_errno; + } + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + + case RTE_FLOW_ACTION_TYPE_MARK: + if (mark) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "duplicated mark"); + return -rte_errno; + } + + mark = actions->conf; + if (!mark) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "mark must be defined"); + return -rte_errno; + } else if (mark->id > IPN3KE_FLOW_RESULT_UID_MAX) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "mark id is out of range"); + return -rte_errno; + } + + parser->mark = 1; + parser->mark_id = mark->id; + break; + + case RTE_FLOW_ACTION_TYPE_DROP: + parser->drop = 1; + break; + + default: + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "invalid action"); + return -rte_errno; + } + } + + if (!parser->drop && !parser->mark) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "no valid actions"); + return -rte_errno; + } + + return 0; +} + +static bool +ipn3ke_match_pattern(const enum rte_flow_item_type *patterns, + const struct rte_flow_item *input) +{ + const struct rte_flow_item *item = input; + + while ((*patterns == item->type) && + (*patterns != RTE_FLOW_ITEM_TYPE_END)) { + patterns++; + item++; + } + + return (*patterns == RTE_FLOW_ITEM_TYPE_END && + item->type == RTE_FLOW_ITEM_TYPE_END); +} + +static pattern_filter_t +ipn3ke_find_filter_func(const struct rte_flow_item *input, + uint32_t *idx) +{ + pattern_filter_t filter = NULL; + uint32_t i; + + for (i = 0; i < RTE_DIM(ipn3ke_supported_patterns); i++) { + if (ipn3ke_match_pattern(ipn3ke_supported_patterns[i].items, + input)) { + filter = ipn3ke_supported_patterns[i].filter; + *idx = i; + break; + } + } + + return filter; +} + +static int +ipn3ke_flow_convert_items(const struct rte_flow_item items[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + pattern_filter_t filter = NULL; + uint32_t idx; + + if (!items) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, + "NULL pattern."); + return -rte_errno; + } + + filter = ipn3ke_find_filter_func(items, &idx); + + if (!filter) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + items, + "Unsupported pattern"); + return -rte_errno; + } + + parser->key_type = idx; + + return filter(items, error, parser); +} + +/* Put the least @nbits of @data into @offset of @dst bits stream, and + * the @offset starts from MSB to LSB in each byte. + * + * MSB LSB + * +------+------+------+------+ + * | | | | | + * +------+------+------+------+ + * ^ ^ + * |<- data: nbits ->| + * | + * offset + */ +static void +copy_data_bits(uint8_t *dst, uint64_t data, + uint32_t offset, uint8_t nbits) +{ + uint8_t set, *p = &dst[offset / BITS_PER_BYTE]; + uint8_t bits_to_set = BITS_PER_BYTE - (offset % BITS_PER_BYTE); + uint8_t mask_to_set = 0xff >> (offset % BITS_PER_BYTE); + uint32_t size = offset + nbits; + + if (nbits > (sizeof(data) * BITS_PER_BYTE)) { + IPN3KE_AFU_PMD_ERR("nbits is out of range"); + return; + } + + while (nbits - bits_to_set >= 0) { + set = data >> (nbits - bits_to_set); + + *p &= ~mask_to_set; + *p |= (set & mask_to_set); + + nbits -= bits_to_set; + bits_to_set = BITS_PER_BYTE; + mask_to_set = 0xff; + p++; + } + + if (nbits) { + uint8_t shift = BITS_PER_BYTE - (size % BITS_PER_BYTE); + + set = data << shift; + mask_to_set = 0xff << shift; + + *p &= ~mask_to_set; + *p |= (set & mask_to_set); + } +} + +static void +ipn3ke_flow_key_generation(struct ipn3ke_flow_parse *parser, + struct rte_flow *flow) +{ + uint32_t i, shift_bytes, len_in_bytes, offset; + uint64_t key; + uint8_t *dst; + + dst = flow->rule.key; + + copy_data_bits(dst, + parser->key_type, + IPN3KE_FLOW_KEY_ID_OFFSET, + IPN3KE_FLOW_KEY_ID_BITS); + + /* The MSb of key is filled to 0 when it is less than + * IPN3KE_FLOW_KEY_DATA_BITS bit. And the parsed key data is + * save as MSB byte first in the array, it needs to move + * the bits before formatting them. + */ + key = 0; + shift_bytes = 0; + len_in_bytes = BITS_TO_BYTES(parser->key_len); + offset = (IPN3KE_FLOW_KEY_DATA_OFFSET + + IPN3KE_FLOW_KEY_DATA_BITS - + parser->key_len); + + for (i = 0; i < len_in_bytes; i++) { + key = (key << 8) | parser->key[i]; + + if (++shift_bytes == sizeof(key)) { + shift_bytes = 0; + + copy_data_bits(dst, key, offset, + sizeof(key) * BITS_PER_BYTE); + offset += sizeof(key) * BITS_PER_BYTE; + key = 0; + } + } + + if (shift_bytes != 0) { + uint32_t rem_bits; + + rem_bits = parser->key_len % (sizeof(key) * BITS_PER_BYTE); + key >>= (shift_bytes * 8 - rem_bits); + copy_data_bits(dst, key, offset, rem_bits); + } +} + +static void +ipn3ke_flow_result_generation(struct ipn3ke_flow_parse *parser, + struct rte_flow *flow) +{ + uint8_t *dst; + + if (parser->drop) + return; + + dst = flow->rule.result; + + copy_data_bits(dst, + 1, + IPN3KE_FLOW_RESULT_ACL_OFFSET, + IPN3KE_FLOW_RESULT_ACL_BITS); + + copy_data_bits(dst, + parser->mark_id, + IPN3KE_FLOW_RESULT_UID_OFFSET, + IPN3KE_FLOW_RESULT_UID_BITS); +} + +#define MHL_COMMAND_TIME_COUNT 0xFFFF +#define MHL_COMMAND_TIME_INTERVAL_US 10 + +static int +ipn3ke_flow_hw_update(struct ipn3ke_hw *hw, + struct rte_flow *flow, uint32_t is_add) +{ + uint32_t *pdata = NULL; + uint32_t data; + uint32_t time_out = MHL_COMMAND_TIME_COUNT; + uint32_t i; + + IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump start\n"); + + pdata = (uint32_t *)flow->rule.key; + IPN3KE_AFU_PMD_DEBUG(" - key :"); + + for (i = 0; i < RTE_DIM(flow->rule.key); i++) + IPN3KE_AFU_PMD_DEBUG(" %02x", flow->rule.key[i]); + + for (i = 0; i < 4; i++) + IPN3KE_AFU_PMD_DEBUG(" %02x", ipn3ke_swap32(pdata[3 - i])); + IPN3KE_AFU_PMD_DEBUG("\n"); + + pdata = (uint32_t *)flow->rule.result; + IPN3KE_AFU_PMD_DEBUG(" - result:"); + + for (i = 0; i < RTE_DIM(flow->rule.result); i++) + IPN3KE_AFU_PMD_DEBUG(" %02x", flow->rule.result[i]); + + for (i = 0; i < 1; i++) + IPN3KE_AFU_PMD_DEBUG(" %02x", pdata[i]); + IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump end\n"); + + pdata = (uint32_t *)flow->rule.key; + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_KEY_0, + 0, + ipn3ke_swap32(pdata[3]), + IPN3KE_CLF_MHL_KEY_MASK); + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_KEY_1, + 0, + ipn3ke_swap32(pdata[2]), + IPN3KE_CLF_MHL_KEY_MASK); + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_KEY_2, + 0, + ipn3ke_swap32(pdata[1]), + IPN3KE_CLF_MHL_KEY_MASK); + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_KEY_3, + 0, + ipn3ke_swap32(pdata[0]), + IPN3KE_CLF_MHL_KEY_MASK); + + pdata = (uint32_t *)flow->rule.result; + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_RES, + 0, + ipn3ke_swap32(pdata[0]), + IPN3KE_CLF_MHL_RES_MASK); + + /* insert/delete the key and result */ + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + 0x80000000); + time_out = MHL_COMMAND_TIME_COUNT; + while (IPN3KE_BIT_ISSET(data, IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY) && + (time_out > 0)) { + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + 0x80000000); + time_out--; + rte_delay_us(MHL_COMMAND_TIME_INTERVAL_US); + } + if (!time_out) + return -1; + if (is_add) + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + IPN3KE_CLF_MHL_MGMT_CTRL_INSERT, + 0x3); + else + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + IPN3KE_CLF_MHL_MGMT_CTRL_DELETE, + 0x3); + + return 0; +} + +static int +ipn3ke_flow_hw_flush(struct ipn3ke_hw *hw) +{ + uint32_t data; + uint32_t time_out = MHL_COMMAND_TIME_COUNT; + + /* flush the MHL lookup table */ + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + 0x80000000); + time_out = MHL_COMMAND_TIME_COUNT; + while (IPN3KE_BIT_ISSET(data, IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY) && + (time_out > 0)) { + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + 0x80000000); + time_out--; + rte_delay_us(MHL_COMMAND_TIME_INTERVAL_US); + } + if (!time_out) + return -1; + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + IPN3KE_CLF_MHL_MGMT_CTRL_FLUSH, + 0x3); + + return 0; +} + +static void +ipn3ke_flow_convert_finalise(struct ipn3ke_hw *hw, + struct ipn3ke_flow_parse *parser, struct rte_flow *flow) +{ + ipn3ke_flow_key_generation(parser, flow); + ipn3ke_flow_result_generation(parser, flow); + ipn3ke_flow_hw_update(hw, flow, 1); +} + +static int +ipn3ke_flow_convert(const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], struct rte_flow_error *error, + struct ipn3ke_flow_parse *parser) +{ + int ret; + + ret = ipn3ke_flow_convert_attributes(attr, error); + if (ret) + return ret; + + ret = ipn3ke_flow_convert_actions(actions, error, parser); + if (ret) + return ret; + + ret = ipn3ke_flow_convert_items(items, error, parser); + if (ret) + return ret; + + return 0; +} + +static int +ipn3ke_flow_validate(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], struct rte_flow_error *error) +{ + struct ipn3ke_flow_parse parser = {0}; + return ipn3ke_flow_convert(attr, pattern, actions, error, &parser); +} + +static struct rte_flow * +ipn3ke_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], struct rte_flow_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_flow_parse parser = {0}; + struct rte_flow *flow; + int ret; + + if (hw->flow_num_entries == hw->flow_max_entries) { + rte_flow_error_set(error, + ENOBUFS, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "The flow table is full."); + return NULL; + } + + ret = ipn3ke_flow_convert(attr, pattern, actions, error, &parser); + if (ret < 0) { + rte_flow_error_set(error, + -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Failed to create flow."); + return NULL; + } + + flow = rte_zmalloc("ipn3ke_flow", sizeof(struct rte_flow), 0); + if (!flow) { + rte_flow_error_set(error, + ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Failed to allocate memory"); + return flow; + } + + ipn3ke_flow_convert_finalise(hw, &parser, flow); + + TAILQ_INSERT_TAIL(&hw->flow_list, flow, next); + + return flow; +} + +static int +ipn3ke_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, struct rte_flow_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + int ret = 0; + + ret = ipn3ke_flow_hw_update(hw, flow, 0); + if (!ret) { + TAILQ_REMOVE(&hw->flow_list, flow, next); + rte_free(flow); + } else { + rte_flow_error_set(error, + -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Failed to destroy flow."); + } + + return ret; +} + +static int +ipn3ke_flow_flush(struct rte_eth_dev *dev, + __rte_unused struct rte_flow_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct rte_flow *flow, *temp; + + TAILQ_FOREACH_SAFE(flow, &hw->flow_list, next, temp) { + TAILQ_REMOVE(&hw->flow_list, flow, next); + rte_free(flow); + } + + return ipn3ke_flow_hw_flush(hw); +} + +int ipn3ke_flow_init(void *dev) +{ + struct ipn3ke_hw *hw = (struct ipn3ke_hw *)dev; + uint32_t data; + + /* disable rx classifier bypass */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_RX_TEST, + 0, 0, 0x1); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_RX_TEST, + 0, + 0x1); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_TEST: %x\n", data); + + /* configure base mac address */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_BASE_DST_MAC_ADDR_HI, + 0, + 0x2457, + 0xFFFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_BASE_DST_MAC_ADDR_HI, + 0, + 0xFFFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_HI: %x\n", data); + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW, + 0, + 0x9bdf1000, + 0xFFFFFFFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW, + 0, + 0xFFFFFFFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW: %x\n", data); + + + /* configure hash lookup rules enable */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_LKUP_ENABLE, + 0, + 0xFD, + 0xFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_LKUP_ENABLE, + 0, + 0xFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_LKUP_ENABLE: %x\n", data); + + + /* configure rx parse config, settings associatied with VxLAN */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_RX_PARSE_CFG, + 0, + 0x212b5, + 0x3FFFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_RX_PARSE_CFG, + 0, + 0x3FFFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_PARSE_CFG: %x\n", data); + + + /* configure QinQ S-Tag */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_QINQ_STAG, + 0, + 0x88a8, + 0xFFFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_QINQ_STAG, + 0, + 0xFFFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_QINQ_STAG: %x\n", data); + + + /* configure gen ctrl */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_GEN_CTRL, + 0, + 0x3, + 0x3); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_GEN_CTRL, + 0, + 0x1F); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_GEN_CTRL: %x\n", data); + + + /* clear monitoring register */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_MON_0, + 0, + 0xFFFFFFFF, + 0xFFFFFFFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_MON_0, + 0, + 0xFFFFFFFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_MON_0: %x\n", data); + + + ipn3ke_flow_hw_flush(hw); + + TAILQ_INIT(&hw->flow_list); + hw->flow_max_entries = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_EM_NUM, + 0, + 0xFFFFFFFF); + hw->flow_num_entries = 0; + + return 0; +} + +const struct rte_flow_ops ipn3ke_flow_ops = { + .validate = ipn3ke_flow_validate, + .create = ipn3ke_flow_create, + .destroy = ipn3ke_flow_destroy, + .flush = ipn3ke_flow_flush, +}; + diff --git a/drivers/net/ipn3ke/ipn3ke_flow.h b/drivers/net/ipn3ke/ipn3ke_flow.h new file mode 100644 index 0000000..ef1a61f --- /dev/null +++ b/drivers/net/ipn3ke/ipn3ke_flow.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#ifndef _IPN3KE_FLOW_H_ +#define _IPN3KE_FLOW_H_ + +/** + * Expand the length to DWORD alignment with 'Unused' field. + * + * FLOW KEY: + * | Unused |Ruler id (id) | Key1 Key2 … (data) | + * |--------+---------------+--------------------| + * | 17bits | 3 bits | Total 108 bits | + * MSB ---> LSB + * + * Note: And the MSb of key data is filled to 0 when it is less + * than 108 bit. + */ +#define IPN3KE_FLOW_KEY_UNUSED_BITS 17 +#define IPN3KE_FLOW_KEY_ID_BITS 3 +#define IPN3KE_FLOW_KEY_DATA_BITS 108 + +#define IPN3KE_FLOW_KEY_TOTAL_BITS \ + (IPN3KE_FLOW_KEY_UNUSED_BITS + \ + IPN3KE_FLOW_KEY_ID_BITS + \ + IPN3KE_FLOW_KEY_DATA_BITS) + +#define IPN3KE_FLOW_KEY_ID_OFFSET \ + (IPN3KE_FLOW_KEY_UNUSED_BITS) + +#define IPN3KE_FLOW_KEY_DATA_OFFSET \ + (IPN3KE_FLOW_KEY_ID_OFFSET + IPN3KE_FLOW_KEY_ID_BITS) + +/** + * Expand the length to DWORD alignment with 'Unused' field. + * + * FLOW RESULT: + * | Unused | enable (acl) | uid | + * |---------+--------------+--------------| + * | 15 bits | 1 bit | 16 bits | + * MSB ---> LSB + */ + +#define IPN3KE_FLOW_RESULT_UNUSED_BITS 15 +#define IPN3KE_FLOW_RESULT_ACL_BITS 1 +#define IPN3KE_FLOW_RESULT_UID_BITS 16 + +#define IPN3KE_FLOW_RESULT_TOTAL_BITS \ + (IPN3KE_FLOW_RESULT_UNUSED_BITS + \ + IPN3KE_FLOW_RESULT_ACL_BITS + \ + IPN3KE_FLOW_RESULT_UID_BITS) + +#define IPN3KE_FLOW_RESULT_ACL_OFFSET \ + (IPN3KE_FLOW_RESULT_UNUSED_BITS) + +#define IPN3KE_FLOW_RESULT_UID_OFFSET \ + (IPN3KE_FLOW_RESULT_ACL_OFFSET + IPN3KE_FLOW_RESULT_ACL_BITS) + +#define IPN3KE_FLOW_RESULT_UID_MAX \ + ((1UL << IPN3KE_FLOW_RESULT_UID_BITS) - 1) + +#ifndef BITS_PER_BYTE +#define BITS_PER_BYTE 8 +#endif +#define BITS_TO_BYTES(bits) \ + (((bits) + BITS_PER_BYTE - 1) / BITS_PER_BYTE) + +struct ipn3ke_flow_rule { + uint8_t key[BITS_TO_BYTES(IPN3KE_FLOW_KEY_TOTAL_BITS)]; + uint8_t result[BITS_TO_BYTES(IPN3KE_FLOW_RESULT_TOTAL_BITS)]; +}; + +struct rte_flow { + TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ + + struct ipn3ke_flow_rule rule; +}; + +TAILQ_HEAD(ipn3ke_flow_list, rte_flow); + +static inline uint16_t ipn3ke_swap16(uint16_t x) +{ + return ((x & 0xff) << 8) | ((x >> 8) & 0xff); +} + +static inline uint32_t ipn3ke_swap32(uint32_t x) +{ + uint32_t high, low; + uint32_t high1, low1; + + high = (x >> 16) & 0xffff; + low = x & 0xffff; + high1 = ipn3ke_swap16(low); + high1 = high1 << 16; + low1 = ipn3ke_swap16(high); + low1 = low1 & 0xffff; + + return high1 | low1; +} + +extern const struct rte_flow_ops ipn3ke_flow_ops; + +int ipn3ke_flow_init(void *dev); + +#endif /* _IPN3KE_FLOW_H_ */ diff --git a/drivers/net/ipn3ke/ipn3ke_representor.c b/drivers/net/ipn3ke/ipn3ke_representor.c index 63098bf..cf3b214 100644 --- a/drivers/net/ipn3ke/ipn3ke_representor.c +++ b/drivers/net/ipn3ke/ipn3ke_representor.c @@ -21,6 +21,7 @@ #include #include "ipn3ke_rawdev_api.h" +#include "ipn3ke_flow.h" #include "ipn3ke_logs.h" #include "ipn3ke_ethdev.h" @@ -746,7 +747,7 @@ case RTE_ETH_FILTER_GENERIC: if (filter_op != RTE_ETH_FILTER_GET) return -EINVAL; - *(const void **)arg = NULL; + *(const void **)arg = &ipn3ke_flow_ops; break; default: IPN3KE_AFU_PMD_WARN("Filter type (%d) not supported", diff --git a/drivers/net/ipn3ke/ipn3ke_tm.c b/drivers/net/ipn3ke/ipn3ke_tm.c index 8baa2fb..4bcf3aa 100644 --- a/drivers/net/ipn3ke/ipn3ke_tm.c +++ b/drivers/net/ipn3ke/ipn3ke_tm.c @@ -23,6 +23,7 @@ #include #include "ipn3ke_rawdev_api.h" +#include "ipn3ke_flow.h" #include "ipn3ke_logs.h" #include "ipn3ke_ethdev.h" diff --git a/drivers/net/ipn3ke/meson.build b/drivers/net/ipn3ke/meson.build index 3a95efc..74b4d7c 100644 --- a/drivers/net/ipn3ke/meson.build +++ b/drivers/net/ipn3ke/meson.build @@ -12,5 +12,6 @@ allow_experimental_apis = true sources += files('ipn3ke_ethdev.c', 'ipn3ke_representor.c', - 'ipn3ke_tm.c') + 'ipn3ke_tm.c', + 'ipn3ke_flow.c') deps += ['bus_ifpga', 'sched'] -- 1.8.3.1