From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from rcdn-iport-8.cisco.com (rcdn-iport-8.cisco.com [173.37.86.79]) by dpdk.org (Postfix) with ESMTP id 698172B92 for ; Fri, 31 Mar 2017 04:06:27 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=cisco.com; i=@cisco.com; l=32236; q=dns/txt; s=iport; t=1490925987; x=1492135587; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=mDvk/fa1EvkGuwo19SNSyePzY/yVu83nOtH9X6rLOjg=; b=Ntvkt/B6fsr89NX0DY3S0MW4MlMJml1oPXvc3RJQEQxM+rNndAfXP+/7 VXCC8xejkMuiMBOOWh9geALhDME7owZ4InzZL2kpC+/zTMS5jI+jaLJkd KrL6fJZ4GFAO6fXhuu+v2BkxaDXRsgFtUhpBcSp7PxhVg1AI1GtqPiwm3 g=; X-IronPort-AV: E=Sophos;i="5.36,250,1486425600"; d="scan'208";a="225122784" Received: from alln-core-12.cisco.com ([173.36.13.134]) by rcdn-iport-8.cisco.com with ESMTP/TLS/DHE-RSA-AES256-SHA; 31 Mar 2017 02:06:26 +0000 Received: from cisco.com (savbu-usnic-a.cisco.com [10.193.184.48]) by alln-core-12.cisco.com (8.14.5/8.14.5) with ESMTP id v2V26Qiv002747; Fri, 31 Mar 2017 02:06:26 GMT Received: by cisco.com (Postfix, from userid 392789) id 16D233FAAF17; Thu, 30 Mar 2017 19:06:26 -0700 (PDT) From: John Daley To: ferruh.yigit@intel.com, john.mcnamara@intel.com Cc: dev@dpdk.org, John Daley Date: Thu, 30 Mar 2017 19:06:18 -0700 Message-Id: <20170331020622.25498-4-johndale@cisco.com> X-Mailer: git-send-email 2.12.0 In-Reply-To: <20170331020622.25498-1-johndale@cisco.com> References: <20170330212838.31291-1-johndale@cisco.com> <20170331020622.25498-1-johndale@cisco.com> Subject: [dpdk-dev] [PATCH v2 3/7] net/enic: flow API for NICs with advanced filters enabled X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 31 Mar 2017 02:06:28 -0000 Flow support for 1300 series adapters with the 'Advanced Filter' mode enabled via the UCS management interface. This enables: Attributes: ingress Items: eth, ipv4, ipv6, udp, tcp, vxlan, inner eth (full hdr masking) Actions: queue, mark, flag and void Selectors: 'is', 'spec' and 'mask'. 'last' is not supported Signed-off-by: John Daley Reviewed-by: Nelson Escobar --- drivers/net/enic/enic.h | 14 +- drivers/net/enic/enic_flow.c | 924 ++++++++++++++++++++++++++++++++++++++++++- drivers/net/enic/enic_main.c | 3 + drivers/net/enic/enic_res.c | 15 + drivers/net/enic/enic_rxtx.c | 16 +- 5 files changed, 953 insertions(+), 19 deletions(-) diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h index af1098b05..41dadb0ba 100644 --- a/drivers/net/enic/enic.h +++ b/drivers/net/enic/enic.h @@ -80,6 +80,8 @@ #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ +/* Special Filter id for non-specific packet flagging. Don't change value */ +#define ENIC_MAGIC_FILTER_ID 0xffff #define ENICPMD_FDIR_MAX 64 @@ -111,6 +113,12 @@ struct enic_memzone_entry { LIST_ENTRY(enic_memzone_entry) entries; }; +struct rte_flow { + LIST_ENTRY(rte_flow) next; + u16 enic_filter_id; + struct filter_v2 enic_filter; +}; + /* Per-instance private data structure */ struct enic { struct enic *next; @@ -135,7 +143,9 @@ struct enic { int link_status; u8 hw_ip_checksum; u16 max_mtu; - u16 adv_filters; + u8 adv_filters; + u32 flow_filter_mode; + u8 filter_tags; unsigned int flags; unsigned int priv_flags; @@ -170,6 +180,8 @@ struct enic { rte_spinlock_t memzone_list_lock; rte_spinlock_t mtu_lock; + LIST_HEAD(enic_flows, rte_flow) flows; + rte_spinlock_t flows_lock; }; /* Get the CQ index from a Start of Packet(SOP) RQ index */ diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c index d25390f8a..e4c043665 100644 --- a/drivers/net/enic/enic_flow.c +++ b/drivers/net/enic/enic_flow.c @@ -55,6 +55,875 @@ #define FLOW_LOG(level, fmt, args...) do { } while (0) #endif +/** Info about how to copy items into enic filters. */ +struct enic_items { + /** Function for copying and validating an item. */ + int (*copy_item)(const struct rte_flow_item *item, + struct filter_v2 *enic_filter, u8 *inner_ofst); + /** List of valid previous items. */ + const enum rte_flow_item_type * const prev_items; + /** True if it's OK for this item to be the first item. For some NIC + * versions, it's invalid to start the stack above layer 3. + */ + const u8 valid_start_item; +}; + +/** Filtering capabilities for various NIC and firmware versions. */ +struct enic_filter_cap { + /** list of valid items and their handlers and attributes. */ + const struct enic_items *item_info; +}; + +/* functions for copying flow actions into enic actions */ +typedef int (copy_action_fn)(const struct rte_flow_action actions[], + struct filter_action_v2 *enic_action); + +/* functions for copying items into enic filters */ +typedef int(enic_copy_item_fn)(const struct rte_flow_item *item, + struct filter_v2 *enic_filter, u8 *inner_ofst); + +/** Action capabilities for various NICs. */ +struct enic_action_cap { + /** list of valid actions */ + const enum rte_flow_action_type *actions; + /** copy function for a particular NIC */ + int (*copy_fn)(const struct rte_flow_action actions[], + struct filter_action_v2 *enic_action); +}; + +/* Forward declarations */ +static enic_copy_item_fn enic_copy_item_eth_v2; +static enic_copy_item_fn enic_copy_item_vlan_v2; +static enic_copy_item_fn enic_copy_item_ipv4_v2; +static enic_copy_item_fn enic_copy_item_ipv6_v2; +static enic_copy_item_fn enic_copy_item_udp_v2; +static enic_copy_item_fn enic_copy_item_tcp_v2; +static enic_copy_item_fn enic_copy_item_sctp_v2; +static enic_copy_item_fn enic_copy_item_sctp_v2; +static enic_copy_item_fn enic_copy_item_vxlan_v2; +static copy_action_fn enic_copy_action_v2; + +/** NICs with Advanced filters enabled */ +static const struct enic_items enic_items_v3[] = { + [RTE_FLOW_ITEM_TYPE_ETH] = { + .copy_item = enic_copy_item_eth_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_END, + }, + }, + [RTE_FLOW_ITEM_TYPE_VLAN] = { + .copy_item = enic_copy_item_vlan_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_END, + }, + }, + [RTE_FLOW_ITEM_TYPE_IPV4] = { + .copy_item = enic_copy_item_ipv4_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, + }, + }, + [RTE_FLOW_ITEM_TYPE_IPV6] = { + .copy_item = enic_copy_item_ipv6_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, + }, + }, + [RTE_FLOW_ITEM_TYPE_UDP] = { + .copy_item = enic_copy_item_udp_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, + }, + }, + [RTE_FLOW_ITEM_TYPE_TCP] = { + .copy_item = enic_copy_item_tcp_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, + }, + }, + [RTE_FLOW_ITEM_TYPE_SCTP] = { + .copy_item = enic_copy_item_sctp_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, + }, + }, + [RTE_FLOW_ITEM_TYPE_VXLAN] = { + .copy_item = enic_copy_item_vxlan_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, + }, + }, +}; + +/** Filtering capabilites indexed this NICs supported filter type. */ +static const struct enic_filter_cap enic_filter_cap[] = { + [FILTER_DPDK_1] = { + .item_info = enic_items_v3, + }, +}; + +/** Supported actions for newer NICs */ +static const enum rte_flow_action_type enic_supported_actions_v2[] = { + RTE_FLOW_ACTION_TYPE_QUEUE, + RTE_FLOW_ACTION_TYPE_MARK, + RTE_FLOW_ACTION_TYPE_FLAG, + RTE_FLOW_ACTION_TYPE_END, +}; + +/** Action capabilites indexed by NIC version information */ +static const struct enic_action_cap enic_action_cap[] = { + [FILTER_ACTION_V2_ALL] = { + .actions = enic_supported_actions_v2, + .copy_fn = enic_copy_action_v2, + }, +}; +/** + * Copy ETH item into version 2 NIC filter. + * + * @param item[in] + * Item specification. + * @param enic_filter[out] + * Partially filled in NIC filter structure. + * @param inner_ofst[in] + * If zero, this is an outer header. If non-zero, this is the offset into L5 + * where the header begins. + */ +static int +enic_copy_item_eth_v2(const struct rte_flow_item *item, + struct filter_v2 *enic_filter, u8 *inner_ofst) +{ + struct ether_hdr enic_spec; + struct ether_hdr enic_mask; + const struct rte_flow_item_eth *spec = item->spec; + const struct rte_flow_item_eth *mask = item->mask; + struct filter_generic_1 *gp = &enic_filter->u.generic_1; + + FLOW_TRACE(); + + /* Match all if no spec */ + if (!spec) + return 0; + + if (!mask) + mask = &rte_flow_item_eth_mask; + + memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes, + ETHER_ADDR_LEN); + memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes, + ETHER_ADDR_LEN); + + memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes, + ETHER_ADDR_LEN); + memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes, + ETHER_ADDR_LEN); + enic_spec.ether_type = spec->type; + enic_mask.ether_type = mask->type; + + if (!*inner_ofst) { + /* outer header */ + memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask, + sizeof(struct ether_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec, + sizeof(struct ether_hdr)); + } else { + /* inner header */ + if ((*inner_ofst + sizeof(struct ether_hdr)) > + FILTER_GENERIC_1_KEY_LEN) + return ENOTSUP; + /* Offset into L5 where inner Ethernet header goes */ + memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst], + &enic_mask, sizeof(struct ether_hdr)); + memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst], + &enic_spec, sizeof(struct ether_hdr)); + *inner_ofst += sizeof(struct ether_hdr); + } + return 0; +} + +/** + * Copy VLAN item into version 2 NIC filter. + * + * @param item[in] + * Item specification. + * @param enic_filter[out] + * Partially filled in NIC filter structure. + * @param inner_ofst[in] + * If zero, this is an outer header. If non-zero, this is the offset into L5 + * where the header begins. + */ +static int +enic_copy_item_vlan_v2(const struct rte_flow_item *item, + struct filter_v2 *enic_filter, u8 *inner_ofst) +{ + const struct rte_flow_item_vlan *spec = item->spec; + const struct rte_flow_item_vlan *mask = item->mask; + struct filter_generic_1 *gp = &enic_filter->u.generic_1; + + FLOW_TRACE(); + + /* Match all if no spec */ + if (!spec) + return 0; + + /* Don't support filtering in tpid */ + if (mask) { + if (mask->tpid != 0) + return ENOTSUP; + } else { + mask = &rte_flow_item_vlan_mask; + RTE_ASSERT(mask->tpid == 0); + } + + if (!*inner_ofst) { + /* Outer header. Use the vlan mask/val fields */ + gp->mask_vlan = mask->tci; + gp->val_vlan = spec->tci; + } else { + /* Inner header. Mask/Val start at *inner_ofst into L5 */ + if ((*inner_ofst + sizeof(struct vlan_hdr)) > + FILTER_GENERIC_1_KEY_LEN) + return ENOTSUP; + memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst], + mask, sizeof(struct vlan_hdr)); + memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst], + spec, sizeof(struct vlan_hdr)); + *inner_ofst += sizeof(struct vlan_hdr); + } + return 0; +} + +/** + * Copy IPv4 item into version 2 NIC filter. + * + * @param item[in] + * Item specification. + * @param enic_filter[out] + * Partially filled in NIC filter structure. + * @param inner_ofst[in] + * Must be 0. Don't support inner IPv4 filtering. + */ +static int +enic_copy_item_ipv4_v2(const struct rte_flow_item *item, + struct filter_v2 *enic_filter, u8 *inner_ofst) +{ + const struct rte_flow_item_ipv4 *spec = item->spec; + const struct rte_flow_item_ipv4 *mask = item->mask; + struct filter_generic_1 *gp = &enic_filter->u.generic_1; + + FLOW_TRACE(); + + if (*inner_ofst) + return ENOTSUP; + + /* Match IPv4 */ + gp->mask_flags |= FILTER_GENERIC_1_IPV4; + gp->val_flags |= FILTER_GENERIC_1_IPV4; + + /* Match all if no spec */ + if (!spec) + return 0; + + if (!mask) + mask = &rte_flow_item_ipv4_mask; + + memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr, + sizeof(struct ipv4_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr, + sizeof(struct ipv4_hdr)); + return 0; +} + +/** + * Copy IPv6 item into version 2 NIC filter. + * + * @param item[in] + * Item specification. + * @param enic_filter[out] + * Partially filled in NIC filter structure. + * @param inner_ofst[in] + * Must be 0. Don't support inner IPv6 filtering. + */ +static int +enic_copy_item_ipv6_v2(const struct rte_flow_item *item, + struct filter_v2 *enic_filter, u8 *inner_ofst) +{ + const struct rte_flow_item_ipv6 *spec = item->spec; + const struct rte_flow_item_ipv6 *mask = item->mask; + struct filter_generic_1 *gp = &enic_filter->u.generic_1; + + FLOW_TRACE(); + + if (*inner_ofst) + return ENOTSUP; + + /* Match IPv6 */ + gp->mask_flags |= FILTER_GENERIC_1_IPV6; + gp->val_flags |= FILTER_GENERIC_1_IPV6; + + /* Match all if no spec */ + if (!spec) + return 0; + + if (!mask) + mask = &rte_flow_item_ipv6_mask; + + memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr, + sizeof(struct ipv6_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr, + sizeof(struct ipv6_hdr)); + return 0; +} + +/** + * Copy UDP item into version 2 NIC filter. + * + * @param item[in] + * Item specification. + * @param enic_filter[out] + * Partially filled in NIC filter structure. + * @param inner_ofst[in] + * Must be 0. Don't support inner UDP filtering. + */ +static int +enic_copy_item_udp_v2(const struct rte_flow_item *item, + struct filter_v2 *enic_filter, u8 *inner_ofst) +{ + const struct rte_flow_item_udp *spec = item->spec; + const struct rte_flow_item_udp *mask = item->mask; + struct filter_generic_1 *gp = &enic_filter->u.generic_1; + + FLOW_TRACE(); + + if (*inner_ofst) + return ENOTSUP; + + /* Match UDP */ + gp->mask_flags |= FILTER_GENERIC_1_UDP; + gp->val_flags |= FILTER_GENERIC_1_UDP; + + /* Match all if no spec */ + if (!spec) + return 0; + + if (!mask) + mask = &rte_flow_item_udp_mask; + + memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr, + sizeof(struct udp_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr, + sizeof(struct udp_hdr)); + return 0; +} + +/** + * Copy TCP item into version 2 NIC filter. + * + * @param item[in] + * Item specification. + * @param enic_filter[out] + * Partially filled in NIC filter structure. + * @param inner_ofst[in] + * Must be 0. Don't support inner TCP filtering. + */ +static int +enic_copy_item_tcp_v2(const struct rte_flow_item *item, + struct filter_v2 *enic_filter, u8 *inner_ofst) +{ + const struct rte_flow_item_tcp *spec = item->spec; + const struct rte_flow_item_tcp *mask = item->mask; + struct filter_generic_1 *gp = &enic_filter->u.generic_1; + + FLOW_TRACE(); + + if (*inner_ofst) + return ENOTSUP; + + /* Match TCP */ + gp->mask_flags |= FILTER_GENERIC_1_TCP; + gp->val_flags |= FILTER_GENERIC_1_TCP; + + /* Match all if no spec */ + if (!spec) + return 0; + + if (!mask) + return ENOTSUP; + + memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr, + sizeof(struct tcp_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr, + sizeof(struct tcp_hdr)); + return 0; +} + +/** + * Copy SCTP item into version 2 NIC filter. + * + * @param item[in] + * Item specification. + * @param enic_filter[out] + * Partially filled in NIC filter structure. + * @param inner_ofst[in] + * Must be 0. Don't support inner SCTP filtering. + */ +static int +enic_copy_item_sctp_v2(const struct rte_flow_item *item, + struct filter_v2 *enic_filter, u8 *inner_ofst) +{ + const struct rte_flow_item_sctp *spec = item->spec; + const struct rte_flow_item_sctp *mask = item->mask; + struct filter_generic_1 *gp = &enic_filter->u.generic_1; + + FLOW_TRACE(); + + if (*inner_ofst) + return ENOTSUP; + + /* Match all if no spec */ + if (!spec) + return 0; + + if (!mask) + mask = &rte_flow_item_sctp_mask; + + memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr, + sizeof(struct sctp_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr, + sizeof(struct sctp_hdr)); + return 0; +} + +/** + * Copy UDP item into version 2 NIC filter. + * + * @param item[in] + * Item specification. + * @param enic_filter[out] + * Partially filled in NIC filter structure. + * @param inner_ofst[in] + * Must be 0. VxLAN headers always start at the beginning of L5. + */ +static int +enic_copy_item_vxlan_v2(const struct rte_flow_item *item, + struct filter_v2 *enic_filter, u8 *inner_ofst) +{ + const struct rte_flow_item_vxlan *spec = item->spec; + const struct rte_flow_item_vxlan *mask = item->mask; + struct filter_generic_1 *gp = &enic_filter->u.generic_1; + + FLOW_TRACE(); + + if (*inner_ofst) + return EINVAL; + + /* Match all if no spec */ + if (!spec) + return 0; + + if (!mask) + mask = &rte_flow_item_vxlan_mask; + + memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask, + sizeof(struct vxlan_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec, + sizeof(struct vxlan_hdr)); + + *inner_ofst = sizeof(struct vxlan_hdr); + return 0; +} + +/** + * Return 1 if current item is valid on top of the previous one. + * + * @param prev_item[in] + * The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this + * is the first item. + * @param item_info[in] + * Info about this item, like valid previous items. + * @param is_first[in] + * True if this the first item in the pattern. + */ +static int +item_stacking_valid(enum rte_flow_item_type prev_item, + const struct enic_items *item_info, u8 is_first_item) +{ + enum rte_flow_item_type const *allowed_items = item_info->prev_items; + + FLOW_TRACE(); + + for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) { + if (prev_item == *allowed_items) + return 1; + } + + /* This is the first item in the stack. Check if that's cool */ + if (is_first_item && item_info->valid_start_item) + return 1; + + return 0; +} + +/** + * Build the intenal enic filter structure from the provided pattern. The + * pattern is validated as the items are copied. + * + * @param pattern[in] + * @param items_info[in] + * Info about this NICs item support, like valid previous items. + * @param enic_filter[out] + * NIC specfilc filters derived from the pattern. + * @param error[out] + */ +static int +enic_copy_filter(const struct rte_flow_item pattern[], + const struct enic_items *items_info, + struct filter_v2 *enic_filter, + struct rte_flow_error *error) +{ + int ret; + const struct rte_flow_item *item = pattern; + u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */ + enum rte_flow_item_type prev_item; + const struct enic_items *item_info; + + enic_filter->type = FILTER_DPDK_1; + u8 is_first_item = 1; + + FLOW_TRACE(); + + prev_item = 0; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + /* Get info about how to validate and copy the item. If NULL + * is returned the nic does not support the item. + */ + if (item->type == RTE_FLOW_ITEM_TYPE_VOID) + continue; + + item_info = &items_info[item->type]; + + /* check to see if item stacking is valid */ + if (!item_stacking_valid(prev_item, item_info, is_first_item)) + goto stacking_error; + + ret = item_info->copy_item(item, enic_filter, &inner_ofst); + if (ret) + goto item_not_supported; + prev_item = item->type; + is_first_item = 0; + } + return 0; + +item_not_supported: + rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "enic type error"); + return -rte_errno; + +stacking_error: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "stacking error"); + return -rte_errno; +} +/** + * Build the intenal version 2 NIC action structure from the provided pattern. + * The pattern is validated as the items are copied. + * + * @param actions[in] + * @param enic_action[out] + * NIC specfilc actions derived from the actions. + * @param error[out] + */ +static int +enic_copy_action_v2(const struct rte_flow_action actions[], + struct filter_action_v2 *enic_action) +{ + FLOW_TRACE(); + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: { + const struct rte_flow_action_queue *queue = + (const struct rte_flow_action_queue *) + actions->conf; + enic_action->rq_idx = + enic_rte_rq_idx_to_sop_idx(queue->index); + enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG; + break; + } + case RTE_FLOW_ACTION_TYPE_MARK: { + const struct rte_flow_action_mark *mark = + (const struct rte_flow_action_mark *) + actions->conf; + + /* ENIC_MAGIC_FILTER_ID is reserved and is the highest + * in the range of allows mark ids. + */ + if (mark->id >= ENIC_MAGIC_FILTER_ID) + return EINVAL; + enic_action->filter_id = mark->id; + enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG; + break; + } + case RTE_FLOW_ACTION_TYPE_FLAG: { + enic_action->filter_id = ENIC_MAGIC_FILTER_ID; + enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG; + break; + } + case RTE_FLOW_ACTION_TYPE_VOID: + continue; + default: + RTE_ASSERT(0); + break; + } + } + enic_action->type = FILTER_ACTION_V2; + return 0; +} + +/** Check if the action is supported */ +static int +enic_match_action(const struct rte_flow_action *action, + const enum rte_flow_action_type *supported_actions) +{ + for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END; + supported_actions++) { + if (action->type == *supported_actions) + return 1; + } + return 0; +} + +/** Get the NIC filter capabilties structure */ +static const struct enic_filter_cap * +enic_get_filter_cap(struct enic *enic) +{ + /* FIXME: only support advanced filters for now */ + if (enic->flow_filter_mode != FILTER_DPDK_1) + return (const struct enic_filter_cap *)NULL; + + if (enic->flow_filter_mode) + return &enic_filter_cap[enic->flow_filter_mode]; + + return (const struct enic_filter_cap *)NULL; +} + +/** Get the actions for this NIC version. */ +static const struct enic_action_cap * +enic_get_action_cap(struct enic *enic) +{ + static const struct enic_action_cap *ea; + + if (enic->filter_tags) + ea = &enic_action_cap[FILTER_ACTION_V2_ALL]; + return ea; +} +/** + * Internal flow parse/validate function. + * + * @param dev[in] + * This device pointer. + * @param pattern[in] + * @param actions[in] + * @param error[out] + * @param enic_filter[out] + * Internal NIC filter structure pointer. + * @param enic_action[out] + * Internal NIC action structure pointer. + */ +static int +enic_flow_parse(struct rte_eth_dev *dev, + const struct rte_flow_attr *attrs, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct filter_v2 *enic_filter, + struct filter_action_v2 *enic_action) +{ + unsigned int ret = 0; + struct enic *enic = pmd_priv(dev); + const struct enic_filter_cap *enic_filter_cap; + const struct enic_action_cap *enic_action_cap; + const struct rte_flow_action *action; + + FLOW_TRACE(); + + memset(enic_filter, 0, sizeof(*enic_filter)); + memset(enic_action, 0, sizeof(*enic_action)); + + if (!pattern) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "No pattern specified"); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "No action specified"); + return -rte_errno; + } + + if (attrs) { + if (attrs->group) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, + "priority groups are not supported"); + return -rte_errno; + } else if (attrs->priority) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + NULL, + "priorities are not supported"); + return -rte_errno; + } else if (attrs->egress) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + NULL, + "egress is not supported"); + return -rte_errno; + } else if (!attrs->ingress) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + NULL, + "only ingress is supported"); + return -rte_errno; + } + + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "No attribute specified"); + return -rte_errno; + } + + /* Verify Actions. */ + enic_action_cap = enic_get_action_cap(enic); + for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END; + action++) { + if (action->type == RTE_FLOW_ACTION_TYPE_VOID) + continue; + else if (!enic_match_action(action, enic_action_cap->actions)) + break; + } + if (action->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION, + action, "Invalid action."); + return -rte_errno; + } + ret = enic_action_cap->copy_fn(actions, enic_action); + if (ret) { + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Unsupported action."); + return -rte_errno; + } + + /* Verify Flow items. If copying the filter from flow format to enic + * format fails, the flow is not supported + */ + enic_filter_cap = enic_get_filter_cap(enic); + if (enic_filter_cap == NULL) { + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Flow API not available"); + return -rte_errno; + } + ret = enic_copy_filter(pattern, enic_filter_cap->item_info, + enic_filter, error); + return ret; +} + +/** + * Push filter/action to the NIC. + * + * @param enic[in] + * Device structure pointer. + * @param enic_filter[in] + * Internal NIC filter structure pointer. + * @param enic_action[in] + * Internal NIC action structure pointer. + * @param error[out] + */ +static struct rte_flow * +enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter, + struct filter_action_v2 *enic_action, + struct rte_flow_error *error) +{ + struct rte_flow *flow; + int ret; + u16 entry; + + FLOW_TRACE(); + + flow = rte_calloc(__func__, 1, sizeof(*flow), 0); + if (!flow) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "cannot allocate flow memory"); + return NULL; + } + + /* entry[in] is the queue id, entry[out] is the filter Id for delete */ + entry = enic_action->rq_idx; + ret = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter, + enic_action); + if (!ret) { + flow->enic_filter_id = entry; + flow->enic_filter = *enic_filter; + } else { + rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "vnic_dev_classifier error"); + rte_free(flow); + return NULL; + } + return flow; +} + +/** + * Remove filter/action from the NIC. + * + * @param enic[in] + * Device structure pointer. + * @param filter_id[in] + * Id of NIC filter. + * @param enic_action[in] + * Internal NIC action structure pointer. + * @param error[out] + */ +static int +enic_flow_del_filter(struct enic *enic, u16 filter_id, + struct rte_flow_error *error) +{ + int ret; + + FLOW_TRACE(); + + ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL); + if (!ret) + rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "vnic_dev_classifier failed"); + return ret; +} + /* * The following functions are callbacks for Generic flow API. */ @@ -71,15 +940,15 @@ enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs, const struct rte_flow_action actions[], struct rte_flow_error *error) { - (void)dev; - (void)attrs; - (void)pattern; - (void)actions; - (void)error; + struct filter_v2 enic_filter; + struct filter_action_v2 enic_action; + int ret; FLOW_TRACE(); - return 0; + ret = enic_flow_parse(dev, attrs, pattern, actions, error, + &enic_filter, &enic_action); + return ret; } /** @@ -95,15 +964,27 @@ enic_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - (void)dev; - (void)attrs; - (void)pattern; - (void)actions; - (void)error; + int ret; + struct filter_v2 enic_filter; + struct filter_action_v2 enic_action; + struct rte_flow *flow; + struct enic *enic = pmd_priv(dev); FLOW_TRACE(); - return NULL; + ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter, + &enic_action); + if (ret < 0) + return NULL; + + rte_spinlock_lock(&enic->flows_lock); + flow = enic_flow_add_filter(enic, &enic_filter, &enic_action, + error); + if (flow) + LIST_INSERT_HEAD(&enic->flows, flow, next); + rte_spinlock_unlock(&enic->flows_lock); + + return flow; } /** @@ -116,11 +997,14 @@ static int enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, __rte_unused struct rte_flow_error *error) { - (void)dev; - (void)flow; + struct enic *enic = pmd_priv(dev); FLOW_TRACE(); + rte_spinlock_lock(&enic->flows_lock); + enic_flow_del_filter(enic, flow->enic_filter_id, error); + LIST_REMOVE(flow, next); + rte_spinlock_unlock(&enic->flows_lock); return 0; } @@ -133,11 +1017,19 @@ enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, static int enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) { - (void)dev; - (void)error; + struct rte_flow *flow; + struct enic *enic = pmd_priv(dev); FLOW_TRACE(); + rte_spinlock_lock(&enic->flows_lock); + + while (!LIST_EMPTY(&enic->flows)) { + flow = LIST_FIRST(&enic->flows); + enic_flow_del_filter(enic, flow->enic_filter_id, error); + LIST_REMOVE(flow, next); + } + rte_spinlock_unlock(&enic->flows_lock); return 0; } diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index 570b7b69a..4c515f601 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -1316,6 +1316,9 @@ static int enic_dev_init(struct enic *enic) vnic_dev_set_reset_flag(enic->vdev, 0); + LIST_INIT(&enic->flows); + rte_spinlock_init(&enic->flows_lock); + /* set up link status checking */ vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */ diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c index 867bd25c4..e4b80d49c 100644 --- a/drivers/net/enic/enic_res.c +++ b/drivers/net/enic/enic_res.c @@ -104,6 +104,21 @@ int enic_get_vnic_config(struct enic *enic) dev_info(enic, "Advanced Filters %savailable\n", ((enic->adv_filters) ? "" : "not ")); + err = vnic_dev_capable_filter_mode(enic->vdev, &enic->flow_filter_mode, + &enic->filter_tags); + if (err) { + dev_err(enic_get_dev(enic), + "Error getting filter modes, %d\n", err); + return err; + } + + dev_info(enic, "Flow api filter mode: %s, Filter tagging %savailable\n", + ((enic->flow_filter_mode == FILTER_DPDK_1) ? "DPDK" : + ((enic->flow_filter_mode == FILTER_USNIC_IP) ? "USNIC" : + ((enic->flow_filter_mode == FILTER_IPV4_5TUPLE) ? "5TUPLE" : + "NONE"))), + ((enic->filter_tags) ? "" : "not ")); + c->wq_desc_count = min_t(u32, ENIC_MAX_WQ_DESCS, max_t(u32, ENIC_MIN_WQ_DESCS, diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c index 343dabc64..5f59e3726 100644 --- a/drivers/net/enic/enic_rxtx.c +++ b/drivers/net/enic/enic_rxtx.c @@ -253,8 +253,20 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) } mbuf->vlan_tci = vlan_tci; - /* RSS flag */ - if (enic_cq_rx_desc_rss_type(cqrd)) { + if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) { + struct cq_enet_rq_clsf_desc *clsf_cqd; + uint16_t filter_id; + clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd; + filter_id = clsf_cqd->filter_id; + if (filter_id) { + pkt_flags |= PKT_RX_FDIR; + if (filter_id != ENIC_MAGIC_FILTER_ID) { + mbuf->hash.fdir.hi = clsf_cqd->filter_id; + pkt_flags |= PKT_RX_FDIR_ID; + } + } + } else if (enic_cq_rx_desc_rss_type(cqrd)) { + /* RSS flag */ pkt_flags |= PKT_RX_RSS_HASH; mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd); } -- 2.12.0