From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id C7F10A00E6 for ; Wed, 12 Jun 2019 09:52:36 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 6B9161D162; Wed, 12 Jun 2019 09:52:30 +0200 (CEST) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id 856D91D15A for ; Wed, 12 Jun 2019 09:52:26 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 12 Jun 2019 00:52:24 -0700 X-ExtLoop1: 1 Received: from map1.sh.intel.com ([10.67.111.124]) by orsmga003.jf.intel.com with ESMTP; 12 Jun 2019 00:52:24 -0700 From: Qiming Yang To: dev@dpdk.org Cc: wei zhao Date: Wed, 12 Jun 2019 15:50:27 +0800 Message-Id: <20190612075029.109914-2-qiming.yang@intel.com> X-Mailer: git-send-email 2.9.5 In-Reply-To: <20190612075029.109914-1-qiming.yang@intel.com> References: <1559552722-8970-1-git-send-email-qiming.yang@intel.com> <20190612075029.109914-1-qiming.yang@intel.com> Subject: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: wei zhao The patch enables the backend of rte_flow. It transfers rte_flow_xxx to device specific data structure and configures packet process engine's binary classifier (switch) properly. Signed-off-by: Wei Zhao --- drivers/net/ice/Makefile | 1 + drivers/net/ice/ice_ethdev.h | 6 + drivers/net/ice/ice_switch_filter.c | 502 ++++++++++++++++++++++++++++++++++++ drivers/net/ice/ice_switch_filter.h | 28 ++ drivers/net/ice/meson.build | 3 +- 5 files changed, 539 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ice/ice_switch_filter.c create mode 100644 drivers/net/ice/ice_switch_filter.h diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile index 0e5c55e..b10d826 100644 --- a/drivers/net/ice/Makefile +++ b/drivers/net/ice/Makefile @@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y) SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c endif +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2) CC_AVX2_SUPPORT=1 else diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 1385afa..67a358a 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -234,6 +234,12 @@ struct ice_vsi { bool offset_loaded; }; +/* Struct to store flow created. */ +struct rte_flow { + TAILQ_ENTRY(rte_flow) node; +void *rule; +}; + struct ice_pf { struct ice_adapter *adapter; /* The adapter this PF associate to */ struct ice_vsi *main_vsi; /* pointer to main VSI structure */ diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c new file mode 100644 index 0000000..e679675 --- /dev/null +++ b/drivers/net/ice/ice_switch_filter.c @@ -0,0 +1,502 @@ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ice_logs.h" +#include "base/ice_type.h" +#include "ice_switch_filter.h" + +static int +ice_parse_switch_filter( + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct ice_adv_rule_info *rule_info, + struct ice_adv_lkup_elem **lkup_list, + uint16_t *lkups_num) +{ + const struct rte_flow_item *item = pattern; + enum rte_flow_item_type item_type; + const struct rte_flow_item_eth *eth_spec, *eth_mask; + const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; + const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; + const struct rte_flow_item_udp *udp_spec, *udp_mask; + const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; + const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask; + const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask; + struct ice_adv_lkup_elem *list; + uint16_t i, j, t = 0; + uint16_t item_num = 0; + enum ice_sw_tunnel_type tun_type = ICE_NON_TUN; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->type == RTE_FLOW_ITEM_TYPE_ETH || + item->type == RTE_FLOW_ITEM_TYPE_IPV4 || + item->type == RTE_FLOW_ITEM_TYPE_IPV6 || + item->type == RTE_FLOW_ITEM_TYPE_UDP || + item->type == RTE_FLOW_ITEM_TYPE_TCP || + item->type == RTE_FLOW_ITEM_TYPE_SCTP || + item->type == RTE_FLOW_ITEM_TYPE_VXLAN || + item->type == RTE_FLOW_ITEM_TYPE_NVGRE) + item_num++; + } + + list = rte_zmalloc(NULL, item_num * sizeof(*list), 0); + if (!list) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, actions, + "no memory malloc"); + goto out; + } + *lkup_list = list; + + for (item = pattern, i = 0; item->type != + RTE_FLOW_ITEM_TYPE_END; item++, i++) { + item_type = item->type; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth_spec = item->spec; + eth_mask = item->mask; + if (eth_spec && eth_mask) { + list[t].type = (tun_type == ICE_NON_TUN) ? + ICE_MAC_OFOS : ICE_MAC_IL; + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { + if (eth_mask->src.addr_bytes[j] == + UINT8_MAX) { + list[t].h_u.eth_hdr. + src_addr[j] = + eth_spec->src.addr_bytes[j]; + list[t].m_u.eth_hdr. + src_addr[j] = + eth_mask->src.addr_bytes[j]; + } + if (eth_mask->dst.addr_bytes[j] == + UINT8_MAX) { + list[t].h_u.eth_hdr. + dst_addr[j] = + eth_spec->dst.addr_bytes[j]; + list[t].m_u.eth_hdr. + dst_addr[j] = + eth_mask->dst.addr_bytes[j]; + } + } + if (eth_mask->type == UINT16_MAX) { + list[t].h_u.eth_hdr.ethtype_id = + rte_be_to_cpu_16(eth_spec->type); + list[t].m_u.eth_hdr.ethtype_id = + UINT16_MAX; + } + t++; + } else if (!eth_spec && !eth_mask) { + list[t].type = (tun_type == ICE_NON_TUN) ? + ICE_MAC_OFOS : ICE_MAC_IL; + } + break; + + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4_spec = item->spec; + ipv4_mask = item->mask; + if (ipv4_spec && ipv4_mask) { + list[t].type = (tun_type == ICE_NON_TUN) ? + ICE_IPV4_OFOS : ICE_IPV4_IL; + if (ipv4_mask->hdr.src_addr == UINT32_MAX) { + list[t].h_u.ipv4_hdr.src_addr = + ipv4_spec->hdr.src_addr; + list[t].m_u.ipv4_hdr.src_addr = + UINT32_MAX; + } + if (ipv4_mask->hdr.dst_addr == UINT32_MAX) { + list[t].h_u.ipv4_hdr.dst_addr = + ipv4_spec->hdr.dst_addr; + list[t].m_u.ipv4_hdr.dst_addr = + UINT32_MAX; + } + if (ipv4_mask->hdr.time_to_live == UINT8_MAX) { + list[t].h_u.ipv4_hdr.time_to_live = + ipv4_spec->hdr.time_to_live; + list[t].m_u.ipv4_hdr.time_to_live = + UINT8_MAX; + } + if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) { + list[t].h_u.ipv4_hdr.protocol = + ipv4_spec->hdr.next_proto_id; + list[t].m_u.ipv4_hdr.protocol = + UINT8_MAX; + } + if (ipv4_mask->hdr.type_of_service == + UINT8_MAX) { + list[t].h_u.ipv4_hdr.tos = + ipv4_spec->hdr.type_of_service; + list[t].m_u.ipv4_hdr.tos = UINT8_MAX; + } + t++; + } else if (!ipv4_spec && !ipv4_mask) { + list[t].type = (tun_type == ICE_NON_TUN) ? + ICE_IPV4_OFOS : ICE_IPV4_IL; + } + break; + + case RTE_FLOW_ITEM_TYPE_IPV6: + ipv6_spec = item->spec; + ipv6_mask = item->mask; + if (ipv6_spec && ipv6_mask) { + list[t].type = (tun_type == ICE_NON_TUN) ? + ICE_IPV6_OFOS : ICE_IPV6_IL; + for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) { + if (ipv6_mask->hdr.src_addr[j] == + UINT8_MAX) { + list[t].h_u.ice_ipv6_ofos_hdr. + src_addr[j] = + ipv6_spec->hdr.src_addr[j]; + list[t].m_u.ice_ipv6_ofos_hdr. + src_addr[j] = + ipv6_mask->hdr.src_addr[j]; + } + if (ipv6_mask->hdr.dst_addr[j] == + UINT8_MAX) { + list[t].h_u.ice_ipv6_ofos_hdr. + dst_addr[j] = + ipv6_spec->hdr.dst_addr[j]; + list[t].m_u.ice_ipv6_ofos_hdr. + dst_addr[j] = + ipv6_mask->hdr.dst_addr[j]; + } + } + if (ipv6_mask->hdr.proto == UINT8_MAX) { + list[t].h_u.ice_ipv6_ofos_hdr.next_hdr = + ipv6_spec->hdr.proto; + list[t].m_u.ice_ipv6_ofos_hdr.next_hdr = + UINT8_MAX; + } + if (ipv6_mask->hdr.hop_limits == UINT8_MAX) { + list[t].h_u.ice_ipv6_ofos_hdr. + hop_limit = ipv6_spec->hdr.hop_limits; + list[t].m_u.ice_ipv6_ofos_hdr. + hop_limit = UINT8_MAX; + } + t++; + } else if (!ipv6_spec && !ipv6_mask) { + list[t].type = (tun_type == ICE_NON_TUN) ? + ICE_IPV4_OFOS : ICE_IPV4_IL; + } + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + udp_spec = item->spec; + udp_mask = item->mask; + if (udp_spec && udp_mask) { + list[t].type = ICE_UDP_ILOS; + if (udp_mask->hdr.src_port == UINT16_MAX) { + list[t].h_u.l4_hdr.src_port = + udp_spec->hdr.src_port; + list[t].m_u.l4_hdr.src_port = + udp_mask->hdr.src_port; + } + if (udp_mask->hdr.dst_port == UINT16_MAX) { + list[t].h_u.l4_hdr.dst_port = + udp_spec->hdr.dst_port; + list[t].m_u.l4_hdr.dst_port = + udp_mask->hdr.dst_port; + } + t++; + } else if (!udp_spec && !udp_mask) { + list[t].type = ICE_UDP_ILOS; + } + break; + + case RTE_FLOW_ITEM_TYPE_TCP: + tcp_spec = item->spec; + tcp_mask = item->mask; + if (tcp_spec && tcp_mask) { + list[t].type = ICE_TCP_IL; + if (tcp_mask->hdr.src_port == UINT16_MAX) { + list[t].h_u.l4_hdr.src_port = + tcp_spec->hdr.src_port; + list[t].m_u.l4_hdr.src_port = + tcp_mask->hdr.src_port; + } + if (tcp_mask->hdr.dst_port == UINT16_MAX) { + list[t].h_u.l4_hdr.dst_port = + tcp_spec->hdr.dst_port; + list[t].m_u.l4_hdr.dst_port = + tcp_mask->hdr.dst_port; + } + t++; + } else if (!tcp_spec && !tcp_mask) { + list[t].type = ICE_TCP_IL; + } + break; + + case RTE_FLOW_ITEM_TYPE_SCTP: + sctp_spec = item->spec; + sctp_mask = item->mask; + if (sctp_spec && sctp_mask) { + list[t].type = ICE_SCTP_IL; + if (sctp_mask->hdr.src_port == UINT16_MAX) { + list[t].h_u.sctp_hdr.src_port = + sctp_spec->hdr.src_port; + list[t].m_u.sctp_hdr.src_port = + sctp_mask->hdr.src_port; + } + if (sctp_mask->hdr.dst_port == UINT16_MAX) { + list[t].h_u.sctp_hdr.dst_port = + sctp_spec->hdr.dst_port; + list[t].m_u.sctp_hdr.dst_port = + sctp_mask->hdr.dst_port; + } + t++; + } else if (!sctp_spec && !sctp_mask) { + list[t].type = ICE_SCTP_IL; + } + break; + + case RTE_FLOW_ITEM_TYPE_VXLAN: + vxlan_spec = item->spec; + vxlan_mask = item->mask; + tun_type = ICE_SW_TUN_VXLAN; + if (vxlan_spec && vxlan_mask) { + list[t].type = ICE_VXLAN; + if (vxlan_mask->vni[0] == UINT8_MAX && + vxlan_mask->vni[1] == UINT8_MAX && + vxlan_mask->vni[2] == UINT8_MAX) { + list[t].h_u.tnl_hdr.vni = + (vxlan_spec->vni[1] << 8) | + vxlan_spec->vni[0]; + list[t].m_u.tnl_hdr.vni = + UINT16_MAX; + } + t++; + } else if (!vxlan_spec && !vxlan_mask) { + list[t].type = ICE_VXLAN; + } + break; + + case RTE_FLOW_ITEM_TYPE_NVGRE: + nvgre_spec = item->spec; + nvgre_mask = item->mask; + tun_type = ICE_SW_TUN_NVGRE; + if (nvgre_spec && nvgre_mask) { + list[t].type = ICE_NVGRE; + if (nvgre_mask->tni[0] == UINT8_MAX && + nvgre_mask->tni[1] == UINT8_MAX && + nvgre_mask->tni[2] == UINT8_MAX) { + list[t].h_u.nvgre_hdr.tni = + (nvgre_spec->tni[1] << 8) | + nvgre_spec->tni[0]; + list[t].m_u.nvgre_hdr.tni = + UINT16_MAX; + } + t++; + } else if (!nvgre_spec && !nvgre_mask) { + list[t].type = ICE_NVGRE; + } + break; + + case RTE_FLOW_ITEM_TYPE_VOID: + case RTE_FLOW_ITEM_TYPE_END: + break; + + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, actions, + "Invalid pattern item."); + goto out; + } + } + + rule_info->tun_type = tun_type; + *lkups_num = t; + + return 0; +out: + return -rte_errno; +} + +/* By now ice switch filter action code implement only +* supports QUEUE or DROP. +*/ +static int +ice_parse_switch_action(struct ice_pf *pf, + const struct rte_flow_action *actions, + struct rte_flow_error *error, + struct ice_adv_rule_info *rule_info) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_vsi *vsi = pf->main_vsi; + const struct rte_flow_action *act; + const struct rte_flow_action_queue *act_q; + uint16_t base_queue, index = 0; + uint32_t reg; + + /* Check if the first non-void action is QUEUE or DROP. */ + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE && + act->type != RTE_FLOW_ACTION_TYPE_DROP) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC); + if (reg & PFLAN_RX_QALLOC_VALID_M) { + base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Invalid queue register"); + return -rte_errno; + } + if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + act_q = act->conf; + rule_info->sw_act.fltr_act = ICE_FWD_TO_Q; + rule_info->sw_act.fwd_id.q_id = base_queue + act_q->index; + if (act_q->index >= pf->dev_data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Invalid queue ID for" + " switch filter."); + return -rte_errno; + } + } else { + rule_info->sw_act.fltr_act = ICE_DROP_PACKET; + } + + rule_info->sw_act.vsi_handle = vsi->idx; + rule_info->rx = 1; + rule_info->sw_act.src = vsi->idx; + + /* Check if the next non-void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + return 0; +} + +static int +ice_switch_rule_set(struct ice_pf *pf, + struct ice_adv_lkup_elem *list, + uint16_t lkups_cnt, + struct ice_adv_rule_info *rule_info, + struct rte_flow *flow) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + int ret; + struct ice_rule_query_data rule_added = {0}; + struct ice_rule_query_data *filter_ptr; + + if (lkups_cnt > ICE_MAX_CHAIN_WORDS) { + PMD_DRV_LOG(ERR, "item number too large for rule"); + return -ENOTSUP; + } + if (!list) { + PMD_DRV_LOG(ERR, "lookup list should not be NULL"); + return -ENOTSUP; + } + + ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added); + + if (!ret) { + filter_ptr = rte_zmalloc("ice_switch_filter", + sizeof(struct ice_rule_query_data), 0); + if (!filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return -EINVAL; + } + flow->rule = filter_ptr; + rte_memcpy(filter_ptr, + &rule_added, + sizeof(struct ice_rule_query_data)); + } + + return ret; +} + +int +ice_create_switch_filter(struct ice_pf *pf, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret = 0; + struct ice_adv_rule_info rule_info = {0}; + struct ice_adv_lkup_elem *list = NULL; + uint16_t lkups_num = 0; + + ret = ice_parse_switch_filter(pattern, actions, error, + &rule_info, &list, &lkups_num); + if (ret) + goto out; + + ret = ice_parse_switch_action(pf, actions, error, &rule_info); + if (ret) + goto out; + + ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow); + if (ret) + goto out; + + rte_free(list); + return 0; + +out: + rte_free(list); + + return -rte_errno; +} + +int +ice_destroy_switch_filter(struct ice_pf *pf, + struct rte_flow *flow) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + int ret; + struct ice_rule_query_data *filter_ptr; + struct ice_rule_query_data rule_added; + + filter_ptr = (struct ice_rule_query_data *) + flow->rule; + rte_memcpy(&rule_added, filter_ptr, + sizeof(struct ice_rule_query_data)); + + if (!filter_ptr) { + PMD_DRV_LOG(ERR, "no such flow" + " create by switch filter"); + return -EINVAL; + } + + ret = ice_rem_adv_rule_by_id(hw, &rule_added); + + rte_free(filter_ptr); + + return ret; +} + +void +ice_free_switch_filter_rule(void *rule) +{ + struct ice_rule_query_data *filter_ptr; + + filter_ptr = (struct ice_rule_query_data *)rule; + + rte_free(filter_ptr); +} diff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h new file mode 100644 index 0000000..957d0d1 --- /dev/null +++ b/drivers/net/ice/ice_switch_filter.h @@ -0,0 +1,28 @@ +#ifndef _ICE_SWITCH_FILTER_H_ +#define _ICE_SWITCH_FILTER_H_ + +#include "base/ice_switch.h" +#include "base/ice_type.h" +#include "ice_ethdev.h" + +#define NEXT_ITEM_OF_ACTION(act, actions, index) \ + do { \ + act = actions + index; \ + while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \ + index++; \ + act = actions + index; \ + } \ + } while (0) + +int +ice_create_switch_filter(struct ice_pf *pf, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow *flow, + struct rte_flow_error *error); +int +ice_destroy_switch_filter(struct ice_pf *pf, + struct rte_flow *flow); +void +ice_free_switch_filter_rule(void *rule); +#endif /* _ICE_SWITCH_FILTER_H_ */ diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build index 2bec688..8697676 100644 --- a/drivers/net/ice/meson.build +++ b/drivers/net/ice/meson.build @@ -6,7 +6,8 @@ objs = [base_objs] sources = files( 'ice_ethdev.c', - 'ice_rxtx.c' + 'ice_rxtx.c', + 'ice_switch_filter.c' ) deps += ['hash'] -- 2.9.5