From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id A9AE07DEB for ; Fri, 26 Sep 2014 07:57:55 +0200 (CEST) Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by fmsmga102.fm.intel.com with ESMTP; 25 Sep 2014 23:03:58 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.97,862,1389772800"; d="scan'208";a="391863657" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by FMSMGA003.fm.intel.com with ESMTP; 25 Sep 2014 22:57:55 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id s8Q63tES027988; Fri, 26 Sep 2014 14:03:55 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id s8Q63rgv012958; Fri, 26 Sep 2014 14:03:55 +0800 Received: (from wujingji@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id s8Q63rid012954; Fri, 26 Sep 2014 14:03:53 +0800 From: Jingjing Wu To: dev@dpdk.org Date: Fri, 26 Sep 2014 14:03:24 +0800 Message-Id: <1411711418-12881-7-git-send-email-jingjing.wu@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1411711418-12881-1-git-send-email-jingjing.wu@intel.com> References: <1411711418-12881-1-git-send-email-jingjing.wu@intel.com> Subject: [dpdk-dev] [PATCH v3 06/20] i40e: implement operations to add/delete flow director X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 26 Sep 2014 05:57:57 -0000 deal with two operations for flow director - RTE_ETH_FILTER_OP_ADD - RTE_ETH_FILTER_OP_DELETE encode the flow inputs to programming packet sent the packet to filter programming queue and check status on the status report queue Signed-off-by: Jingjing Wu Acked-by: Chen Jing D(Mark) Acked-by: Helin Zhang --- lib/librte_pmd_i40e/i40e_ethdev.c | 29 ++ lib/librte_pmd_i40e/i40e_ethdev.h | 3 + lib/librte_pmd_i40e/i40e_fdir.c | 617 +++++++++++++++++++++++++++++++++++++- 3 files changed, 648 insertions(+), 1 deletion(-) diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c index a3f25e6..9791519 100644 --- a/lib/librte_pmd_i40e/i40e_ethdev.c +++ b/lib/librte_pmd_i40e/i40e_ethdev.c @@ -205,6 +205,9 @@ static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); +static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg); /* Default hash key buffer for RSS */ static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1]; @@ -256,6 +259,7 @@ static struct eth_dev_ops i40e_eth_dev_ops = { .reta_query = i40e_dev_rss_reta_query, .rss_hash_update = i40e_dev_rss_hash_update, .rss_hash_conf_get = i40e_dev_rss_hash_conf_get, + .filter_ctrl = i40e_dev_filter_ctrl, }; static struct eth_driver rte_i40e_pmd = { @@ -4221,3 +4225,28 @@ i40e_pf_config_mq_rx(struct i40e_pf *pf) return 0; } + +/* + * Take operations to assigned filter type on NIC. + */ +static int +i40e_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int ret = I40E_SUCCESS; + + if (dev == NULL) + return -EINVAL; + + switch (filter_type) { + case RTE_ETH_FILTER_FDIR: + ret = i40e_fdir_ctrl_func(pf, filter_op, arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported filter type %u.", filter_type); + ret = -EINVAL; + break; + } + return ret; +} diff --git a/lib/librte_pmd_i40e/i40e_ethdev.h b/lib/librte_pmd_i40e/i40e_ethdev.h index 2460635..af149df 100644 --- a/lib/librte_pmd_i40e/i40e_ethdev.h +++ b/lib/librte_pmd_i40e/i40e_ethdev.h @@ -341,6 +341,9 @@ enum i40e_status_code i40e_fdir_setup_rx_resources(struct i40e_pf *pf, unsigned int socket_id); int i40e_fdir_setup(struct i40e_pf *pf); void i40e_fdir_teardown(struct i40e_pf *pf); +int i40e_fdir_ctrl_func(struct i40e_pf *pf, + enum rte_filter_op filter_op, + void *arg); /* I40E_DEV_PRIVATE_TO */ #define I40E_DEV_PRIVATE_TO_PF(adapter) \ diff --git a/lib/librte_pmd_i40e/i40e_fdir.c b/lib/librte_pmd_i40e/i40e_fdir.c index a3e6bd7..82645df 100644 --- a/lib/librte_pmd_i40e/i40e_fdir.c +++ b/lib/librte_pmd_i40e/i40e_fdir.c @@ -44,6 +44,10 @@ #include #include #include +#include +#include +#include +#include #include "i40e_logs.h" #include "i40e/i40e_type.h" @@ -51,12 +55,43 @@ #include "i40e_rxtx.h" #define I40E_FDIR_MZ_NAME "FDIR_MEMZONE" +#ifndef IPV6_ADDR_LEN +#define IPV6_ADDR_LEN 16 +#endif + #define I40E_FDIR_PKT_LEN 512 +#define I40E_FDIR_IP_DEFAULT_LEN 420 +#define I40E_FDIR_IP_DEFAULT_TTL 0x40 +#define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45 +#define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50 +#define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60300000 +#define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF +#define I40E_FDIR_IPv6_PAYLOAD_LEN 380 +#define I40E_FDIR_UDP_DEFAULT_LEN 400 + +/* Wait count and inteval for fdir filter programming */ +#define I40E_FDIR_WAIT_COUNT 10 +#define I40E_FDIR_WAIT_INTERVAL_US 1000 + +/* Wait count and inteval for fdir filter flush */ +#define I40E_FDIR_FLUSH_RETRY 50 +#define I40E_FDIR_FLUSH_INTERVAL_MS 5 + #define I40E_COUNTER_PF 2 /* Statistic counter index for one pf */ #define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF) static int i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq); +static int i40e_fdir_construct_pkt(struct i40e_pf *pf, + struct rte_eth_fdir_input *fdir_input, + unsigned char *raw_pkt); +static int i40e_add_del_fdir_filter(struct i40e_pf *pf, + struct rte_eth_fdir_filter *filter, + bool add); +static int i40e_fdir_filter_programming(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + struct rte_eth_fdir_filter *filter, + bool add); static int i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq) @@ -285,4 +320,584 @@ i40e_fdir_teardown(struct i40e_pf *pf) i40e_vsi_release(vsi); pf->fdir.fdir_vsi = NULL; return; -} \ No newline at end of file +} + +static int +i40e_fdir_construct_pkt(struct i40e_pf *pf, + struct rte_eth_fdir_input *fdir_input, + unsigned char *raw_pkt) +{ + struct ether_hdr *ether; + unsigned char *payload; + struct ipv4_hdr *ip; + struct ipv6_hdr *ip6; + struct udp_hdr *udp; + struct tcp_hdr *tcp; + struct sctp_hdr *sctp; + uint8_t size = 0; + int i, set_idx = 2; /* set_idx = 2 means using l4 pyload by default*/ + + switch (fdir_input->flow_type) { + case RTE_ETH_FLOW_TYPE_UDPV4: + ether = (struct ether_hdr *)raw_pkt; + ip = (struct ipv4_hdr *)(raw_pkt + sizeof(struct ether_hdr)); + udp = (struct udp_hdr *)(raw_pkt + sizeof(struct ether_hdr) + + sizeof(struct ipv4_hdr)); + payload = raw_pkt + sizeof(struct ether_hdr) + + sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr); + + ether->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL; + /* set len to by default */ + ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN); + ip->time_to_live = I40E_FDIR_IP_DEFAULT_TTL; + + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + ip->src_addr = fdir_input->flow.udp4_flow.dst_ip; + ip->dst_addr = fdir_input->flow.udp4_flow.src_ip; + ip->next_proto_id = IPPROTO_UDP; + udp->src_port = fdir_input->flow.udp4_flow.dst_port; + udp->dst_port = fdir_input->flow.udp4_flow.src_port; + udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN); + break; + + case RTE_ETH_FLOW_TYPE_TCPV4: + ether = (struct ether_hdr *)raw_pkt; + ip = (struct ipv4_hdr *)(raw_pkt + sizeof(struct ether_hdr)); + tcp = (struct tcp_hdr *)(raw_pkt + sizeof(struct ether_hdr) + + sizeof(struct ipv4_hdr)); + payload = raw_pkt + sizeof(struct ether_hdr) + + sizeof(struct ipv4_hdr) + sizeof(struct tcp_hdr); + + ether->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL; + ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN); + ip->time_to_live = I40E_FDIR_IP_DEFAULT_TTL; + + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + ip->src_addr = fdir_input->flow.tcp4_flow.dst_ip; + ip->dst_addr = fdir_input->flow.tcp4_flow.src_ip; + ip->next_proto_id = IPPROTO_TCP; + tcp->src_port = fdir_input->flow.tcp4_flow.dst_port; + tcp->dst_port = fdir_input->flow.tcp4_flow.src_port; + tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF; + break; + + case RTE_ETH_FLOW_TYPE_SCTPV4: + ether = (struct ether_hdr *)raw_pkt; + ip = (struct ipv4_hdr *)(raw_pkt + sizeof(struct ether_hdr)); + sctp = (struct sctp_hdr *)(raw_pkt + sizeof(struct ether_hdr) + + sizeof(struct ipv4_hdr)); + payload = raw_pkt + sizeof(struct ether_hdr) + + sizeof(struct ipv4_hdr) + sizeof(struct sctp_hdr); + + ether->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL; + ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN); + ip->time_to_live = I40E_FDIR_IP_DEFAULT_TTL; + + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + ip->src_addr = fdir_input->flow.sctp4_flow.dst_ip; + ip->dst_addr = fdir_input->flow.sctp4_flow.src_ip; + ip->next_proto_id = IPPROTO_SCTP; + sctp->tag = fdir_input->flow.sctp4_flow.verify_tag; + break; + + case RTE_ETH_FLOW_TYPE_IPV4_OTHER: + ether = (struct ether_hdr *)raw_pkt; + ip = (struct ipv4_hdr *)(raw_pkt + sizeof(struct ether_hdr)); + payload = raw_pkt + sizeof(struct ether_hdr) + + sizeof(struct ipv4_hdr); + + ether->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL; + ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN); + ip->time_to_live = I40E_FDIR_IP_DEFAULT_TTL; + ip->next_proto_id = IPPROTO_IP; + + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + ip->src_addr = fdir_input->flow.ip4_flow.dst_ip; + ip->dst_addr = fdir_input->flow.ip4_flow.src_ip; + set_idx = 1; /* l3 pyload */ + break; + + case RTE_ETH_FLOW_TYPE_UDPV6: + ether = (struct ether_hdr *)raw_pkt; + ip6 = (struct ipv6_hdr *)(raw_pkt + sizeof(struct ether_hdr)); + udp = (struct udp_hdr *)(raw_pkt + sizeof(struct ether_hdr) + + sizeof(struct ipv6_hdr)); + payload = raw_pkt + sizeof(struct ether_hdr) + + sizeof(struct ipv6_hdr) + sizeof(struct udp_hdr); + + ether->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + ip6->vtc_flow = rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW); + ip6->payload_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN); + ip6->hop_limits = I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS; + + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + rte_memcpy(&(ip6->src_addr), &(fdir_input->flow.udp6_flow.dst_ip), + IPV6_ADDR_LEN); + rte_memcpy(&(ip6->dst_addr), &(fdir_input->flow.udp6_flow.src_ip), + IPV6_ADDR_LEN); + ip6->proto = IPPROTO_UDP; + udp->src_port = fdir_input->flow.udp6_flow.dst_port; + udp->dst_port = fdir_input->flow.udp6_flow.src_port; + udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN); + break; + + case RTE_ETH_FLOW_TYPE_TCPV6: + ether = (struct ether_hdr *)raw_pkt; + ip6 = (struct ipv6_hdr *)(raw_pkt + sizeof(struct ether_hdr)); + tcp = (struct tcp_hdr *)(raw_pkt + sizeof(struct ether_hdr) + + sizeof(struct ipv6_hdr)); + payload = raw_pkt + sizeof(struct ether_hdr) + + sizeof(struct ipv6_hdr) + sizeof(struct tcp_hdr); + + ether->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + ip6->vtc_flow = rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW); + ip6->payload_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN); + ip6->hop_limits = I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS; + + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + rte_memcpy(&(ip6->src_addr), &(fdir_input->flow.tcp6_flow.dst_ip), + IPV6_ADDR_LEN); + rte_memcpy(&(ip6->dst_addr), &(fdir_input->flow.tcp6_flow.src_ip), + IPV6_ADDR_LEN); + ip6->proto = IPPROTO_TCP; + tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF; + tcp->src_port = fdir_input->flow.udp6_flow.dst_port; + tcp->dst_port = fdir_input->flow.udp6_flow.src_port; + break; + + case RTE_ETH_FLOW_TYPE_SCTPV6: + ether = (struct ether_hdr *)raw_pkt; + ip6 = (struct ipv6_hdr *)(raw_pkt + sizeof(struct ether_hdr)); + sctp = (struct sctp_hdr *)(raw_pkt + sizeof(struct ether_hdr) + + sizeof(struct ipv6_hdr)); + payload = raw_pkt + sizeof(struct ether_hdr) + + sizeof(struct ipv6_hdr) + sizeof(struct sctp_hdr); + + ether->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + ip6->vtc_flow = rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW); + ip6->payload_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN); + ip6->hop_limits = I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS; + + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + rte_memcpy(&(ip6->src_addr), &(fdir_input->flow.sctp6_flow.dst_ip), + IPV6_ADDR_LEN); + rte_memcpy(&(ip6->dst_addr), &(fdir_input->flow.sctp6_flow.src_ip), + IPV6_ADDR_LEN); + ip6->proto = IPPROTO_SCTP; + sctp->tag = fdir_input->flow.sctp6_flow.verify_tag; + break; + + case RTE_ETH_FLOW_TYPE_IPV6_OTHER: + ether = (struct ether_hdr *)raw_pkt; + ip6 = (struct ipv6_hdr *)(raw_pkt + sizeof(struct ether_hdr)); + payload = raw_pkt + sizeof(struct ether_hdr) + sizeof(struct ipv6_hdr); + + ether->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + ip6->vtc_flow = rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW); + ip6->proto = IPPROTO_NONE; + ip6->payload_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN); + ip6->hop_limits = I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS; + + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + rte_memcpy(&(ip6->src_addr), &(fdir_input->flow.ip6_flow.dst_ip), + IPV6_ADDR_LEN); + rte_memcpy(&(ip6->dst_addr), &(fdir_input->flow.ip6_flow.src_ip), + IPV6_ADDR_LEN); + set_idx = 1; /* l3 pyload */ + break; + default: + PMD_DRV_LOG(ERR, "unknown flow type %u\n", fdir_input->flow_type); + return -EINVAL; + } + + for (i = 0; i < 3; i++) { + if (pf->fdir.flex_set[set_idx][i].size == 0) + break; + (void)rte_memcpy(payload + 2 * pf->fdir.flex_set[set_idx][i].offset, + fdir_input->flow_ext.flexwords + size, + 2 * pf->fdir.flex_set[set_idx][i].size); + size = pf->fdir.flex_set[set_idx][i].size; + } + return 0; +} + +/* Construct the tx flags */ +static inline uint64_t +i40e_build_ctob(uint32_t td_cmd, + uint32_t td_offset, + unsigned int size, + uint32_t td_tag) +{ + return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA | + ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | + ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | + ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | + ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); +} + +/* + * check the programming status descriptor in rx queue. + * done after Programming Flow Director is programmed on + * tx queue + */ +static inline int +i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq) +{ + volatile union i40e_rx_desc *rxdp; + uint64_t qword1; + uint32_t rx_status; + uint32_t len, id; + uint32_t error; + int ret = 0; + + rxdp = &rxq->rx_ring[rxq->rx_tail]; + qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); + rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) + >> I40E_RXD_QW1_STATUS_SHIFT; + + if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { + len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT; + id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> + I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; + + if (len == I40E_RX_PROG_STATUS_DESC_LENGTH && + id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) { + error = (qword1 & + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; + if (error == (0x1 << + I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { + PMD_DRV_LOG(ERR, "Failed to add FDIR filter" + " (FD_ID %u): programming status" + " reported.", + rxdp->wb.qword0.hi_dword.fd_id); + ret = -1; + } else if (error == (0x1 << + I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { + PMD_DRV_LOG(ERR, "Failed to delete FDIR filter" + " (FD_ID %u): programming status" + " reported.", + rxdp->wb.qword0.hi_dword.fd_id); + ret = -1; + } else + PMD_DRV_LOG(ERR, "invalid programming status" + " reported, error = %u.", error); + } else + PMD_DRV_LOG(ERR, "unknown programming status" + " reported,len = %d, id = %u.", len, id); + rxdp->wb.qword1.status_error_len = 0; + rxq->rx_tail++; + if (unlikely(rxq->rx_tail == rxq->nb_rx_desc)) + rxq->rx_tail = 0; + } + return ret; +} + +/* + * add or remove flow diretor filter. + */ +static int +i40e_add_del_fdir_filter(struct i40e_pf *pf, + struct rte_eth_fdir_filter *filter, + bool add) +{ + int ret = I40E_SUCCESS; + unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt; + + if (!(pf->flags & I40E_FLAG_FDIR)) { + PMD_DRV_LOG(ERR, "FDIR is not enabled."); + return -ENOTSUP; + } + + memset(pkt, 0, I40E_FDIR_PKT_LEN); + + ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt); + if (ret < 0) { + PMD_DRV_LOG(ERR, "construct packet for fdir fails."); + return ret; + } + + switch (filter->input.flow_type) { + case RTE_ETH_FLOW_TYPE_UDPV4: + ret = i40e_fdir_filter_programming(pf, + I40E_FILTER_PCTYPE_NONF_IPV4_UDP, + filter, add); + if (ret < 0) + PMD_DRV_LOG(ERR, "NONF_IPV4_UDP fdir program fails."); + break; + case RTE_ETH_FLOW_TYPE_TCPV4: + ret = i40e_fdir_filter_programming(pf, + I40E_FILTER_PCTYPE_NONF_IPV4_TCP, + filter, add); + if (ret < 0) + PMD_DRV_LOG(ERR, "NONF_IPV4_TCP fdir program fails."); + break; + case RTE_ETH_FLOW_TYPE_SCTPV4: + ret = i40e_fdir_filter_programming(pf, + I40E_FILTER_PCTYPE_NONF_IPV4_SCTP, + filter, add); + if (ret < 0) { + PMD_DRV_LOG(ERR, "NONF_IPV4_SCTP fdir program fails."); + } + break; + case RTE_ETH_FLOW_TYPE_IPV4_OTHER: + /* program on both NONF_IPV4 and FRAG_IPV4 PCTYPE*/ + ret = i40e_fdir_filter_programming(pf, + I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, + filter, add); + if (ret < 0) + PMD_DRV_LOG(ERR, "NONF_IPV4_OTHER fdir program fails."); + ret = i40e_fdir_filter_programming(pf, + I40E_FILTER_PCTYPE_FRAG_IPV4, + filter, add); + if (ret < 0) + PMD_DRV_LOG(ERR, "NONF_FRAG_IPV4 fdir program fails."); + break; + case RTE_ETH_FLOW_TYPE_UDPV6: + ret = i40e_fdir_filter_programming(pf, + I40E_FILTER_PCTYPE_NONF_IPV6_UDP, + filter, add); + if (ret < 0) + PMD_DRV_LOG(ERR, "NONF_IPV6_UDP fdir program fails."); + break; + case RTE_ETH_FLOW_TYPE_TCPV6: + ret = i40e_fdir_filter_programming(pf, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP, + filter, add); + if (ret < 0) + PMD_DRV_LOG(ERR, "NONF_IPV6_TCP fdir program fails."); + break; + case RTE_ETH_FLOW_TYPE_SCTPV6: + ret = i40e_fdir_filter_programming(pf, + I40E_FILTER_PCTYPE_NONF_IPV6_SCTP, + filter, add); + if (ret < 0) + PMD_DRV_LOG(ERR, "NONF_IPV6_SCTP fdir program fails."); + break; + case RTE_ETH_FLOW_TYPE_IPV6_OTHER: + /* program on both NONF_IPV6 and FRAG_IPV6 PCTYPE*/ + ret = i40e_fdir_filter_programming(pf, + I40E_FILTER_PCTYPE_NONF_IPV6_OTHER, + filter, add); + if (ret < 0) + PMD_DRV_LOG(ERR, "NONF_IPV6_OTHER fdir program fails."); + ret = i40e_fdir_filter_programming(pf, + I40E_FILTER_PCTYPE_FRAG_IPV6, + filter, add); + if (ret < 0) + PMD_DRV_LOG(ERR, "NONF_FRAG_IPV6 fdir program fails."); + break; + default: + PMD_DRV_LOG(ERR, " invalid flow_type input."); + ret = -EINVAL; + } + return ret; +} + +/* + * Program a flow diretor filter rule. + * Is done by Flow Director Programming Descriptor followed by packet + * structure that contains the filter fieldsneed to match. + */ +static int +i40e_fdir_filter_programming(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + struct rte_eth_fdir_filter *filter, + bool add) +{ + struct i40e_tx_queue *txq = pf->fdir.txq; + struct i40e_rx_queue *rxq = pf->fdir.rxq; + struct rte_eth_fdir_input *fdir_input = &filter->input; + struct rte_eth_fdir_action *fdir_action = &filter->action; + volatile struct i40e_tx_desc *txdp; + volatile struct i40e_filter_program_desc *fdirdp; + uint32_t td_cmd; + uint16_t i; + uint8_t dest; + + PMD_DRV_LOG(INFO, "filling filter prgramming descriptor."); + fdirdp = (volatile struct i40e_filter_program_desc *) + (&(txq->tx_ring[txq->tx_tail])); + + fdirdp->qindex_flex_ptype_vsi = + rte_cpu_to_le_32((fdir_action->rx_queue << + I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & + I40E_TXD_FLTR_QW0_QINDEX_MASK); + + fdirdp->qindex_flex_ptype_vsi |= + rte_cpu_to_le_32((fdir_action->flex_off << + I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) & + I40E_TXD_FLTR_QW0_FLEXOFF_MASK); + + fdirdp->qindex_flex_ptype_vsi |= + rte_cpu_to_le_32((pctype << + I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) & + I40E_TXD_FLTR_QW0_PCTYPE_MASK); + + /* Use LAN VSI Id if not programmed by user */ + if (fdir_input->flow_ext.dest_id == 0) + fdirdp->qindex_flex_ptype_vsi |= + rte_cpu_to_le_32((pf->main_vsi->vsi_id << + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) & + I40E_TXD_FLTR_QW0_DEST_VSI_MASK); + else + fdirdp->qindex_flex_ptype_vsi |= + rte_cpu_to_le_32((fdir_input->flow_ext.dest_id << + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) & + I40E_TXD_FLTR_QW0_DEST_VSI_MASK); + + fdirdp->dtype_cmd_cntindex = + rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG); + + if (add) + fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32( + I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT); + else + fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32( + I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT); + + if (fdir_action->drop) + dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; + else + dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX; + fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest << + I40E_TXD_FLTR_QW1_DEST_SHIFT) & + I40E_TXD_FLTR_QW1_DEST_MASK); + + fdirdp->dtype_cmd_cntindex |= + rte_cpu_to_le_32((fdir_action->report_status<< + I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) & + I40E_TXD_FLTR_QW1_FD_STATUS_MASK); + + fdirdp->dtype_cmd_cntindex |= + rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK); + if (fdir_action->cnt_idx != 0) + fdirdp->dtype_cmd_cntindex |= + rte_cpu_to_le_32((fdir_action->cnt_idx << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK); + else + fdirdp->dtype_cmd_cntindex |= + rte_cpu_to_le_32((pf->fdir.match_counter_index << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK); + + fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id); + txq->tx_tail++; + if (txq->tx_tail >= txq->nb_tx_desc) + txq->tx_tail = 0; + + PMD_DRV_LOG(INFO, "filling transmit descriptor."); + txdp = &(txq->tx_ring[txq->tx_tail]); + txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr); + td_cmd = I40E_TX_DESC_CMD_EOP | + I40E_TX_DESC_CMD_RS | + I40E_TX_DESC_CMD_DUMMY; + + txdp->cmd_type_offset_bsz = + i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0); + + txq->tx_tail++; + if (txq->tx_tail >= txq->nb_tx_desc) + txq->tx_tail = 0; + /* Update the tx tail register */ + rte_wmb(); + I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + for (i = 0; i < I40E_FDIR_WAIT_COUNT; i++) { + rte_delay_us(I40E_FDIR_WAIT_INTERVAL_US); + if (txdp->cmd_type_offset_bsz & + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) + break; + } + if (i >= I40E_FDIR_WAIT_COUNT) { + PMD_DRV_LOG(ERR, "Failed to program FDIR filter:" + " timeout to get DD on tx queue."); + return -ETIMEDOUT; + } + /* totally delay 10 ms to check programming status*/ + rte_delay_us((I40E_FDIR_WAIT_COUNT - i) * I40E_FDIR_WAIT_INTERVAL_US); + if (i40e_check_fdir_programming_status(rxq) < 0) { + PMD_DRV_LOG(ERR, "Failed to program FDIR filter:" + " programming status reported."); + return -ENOSYS; + } + + return 0; +} + +/* + * i40e_fdir_ctrl_func - deal with all operations on flow director. + * @pf: board private structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ +int +i40e_fdir_ctrl_func(struct i40e_pf *pf, enum rte_filter_op filter_op, void *arg) +{ + int ret = I40E_SUCCESS; + + if (arg == NULL && filter_op != RTE_ETH_FILTER_OP_NONE && + filter_op != RTE_ETH_FILTER_OP_FLUSH) + return -EINVAL; + + switch (filter_op) { + case RTE_ETH_FILTER_OP_NONE: + if (!(pf->flags & I40E_FLAG_FDIR)) + ret = -ENOTSUP; + break; + case RTE_ETH_FILTER_OP_ADD: + ret = i40e_add_del_fdir_filter(pf, + (struct rte_eth_fdir_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_OP_DELETE: + ret = i40e_add_del_fdir_filter(pf, + (struct rte_eth_fdir_filter *)arg, + FALSE); + break; + default: + PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} -- 1.8.1.4