From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 80CCDA2EDB for ; Sat, 7 Sep 2019 13:03:14 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 6D04B1F2E8; Sat, 7 Sep 2019 13:03:13 +0200 (CEST) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id D32EA1E547 for ; Sat, 7 Sep 2019 13:03:10 +0200 (CEST) X-Amp-Result: UNKNOWN X-Amp-Original-Verdict: FILE UNKNOWN X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 07 Sep 2019 04:03:09 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,477,1559545200"; d="scan'208";a="199712584" Received: from yexl-server.sh.intel.com (HELO localhost) ([10.67.117.5]) by fmsmga001.fm.intel.com with ESMTP; 07 Sep 2019 04:03:08 -0700 Date: Sat, 7 Sep 2019 19:01:04 +0800 From: Ye Xiaolong To: Yahui Cao Cc: Qiming Yang , Wenzhuo Lu , dev@dpdk.org, Qi Zhang , Beilei Xing Message-ID: <20190907110104.GB108591@intel.com> References: <20190906120058.108073-1-yahui.cao@intel.com> <20190906120058.108073-2-yahui.cao@intel.com> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <20190906120058.108073-2-yahui.cao@intel.com> User-Agent: Mutt/1.9.4 (2018-02-28) Subject: Re: [dpdk-dev] [dpdk-dev 01/12] net/ice: initialize and set up flow director X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" On 09/06, Yahui Cao wrote: >From: Beilei Xing > >Enable flow director, include: > - Create control VSI > - Queue pair allocated and set up > - Programming packet > >Signed-off-by: Beilei Xing >--- > drivers/net/ice/Makefile | 1 + > drivers/net/ice/ice_ethdev.c | 107 +++++-- > drivers/net/ice/ice_ethdev.h | 19 ++ > drivers/net/ice/ice_fdir_filter.c | 139 +++++++++ > drivers/net/ice/ice_rxtx.c | 448 ++++++++++++++++++++++++++++++ > drivers/net/ice/ice_rxtx.h | 7 + > drivers/net/ice/meson.build | 3 +- document and release note updates are needed here. > 7 files changed, 704 insertions(+), 20 deletions(-) > create mode 100644 drivers/net/ice/ice_fdir_filter.c > >diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile >index ae53c2646..cbbd03fcf 100644 >--- a/drivers/net/ice/Makefile >+++ b/drivers/net/ice/Makefile >@@ -62,6 +62,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c > endif > > SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c >+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_fdir_filter.c > ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2) > CC_AVX2_SUPPORT=1 > else >diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c >index 647aca3ed..cb32f08df 100644 >--- a/drivers/net/ice/ice_ethdev.c >+++ b/drivers/net/ice/ice_ethdev.c >@@ -1097,11 +1097,20 @@ ice_pf_sw_init(struct rte_eth_dev *dev) > hw->func_caps.common_cap.num_rxq); > > pf->lan_nb_qps = pf->lan_nb_qp_max; >+ if (hw->func_caps.fd_fltr_guar > 0 || >+ hw->func_caps.fd_fltr_best_effort > 0) { >+ pf->flags |= ICE_FLAG_FDIR; >+ pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR; >+ pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps; >+ } else { >+ pf->fdir_nb_qps = 0; >+ } >+ pf->fdir_qp_offset = 0; > > return 0; > } > >-static struct ice_vsi * >+struct ice_vsi * > ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) > { > struct ice_hw *hw = ICE_PF_TO_HW(pf); >@@ -1113,6 +1122,7 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) > struct rte_ether_addr mac_addr; > uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; > uint8_t tc_bitmap = 0x1; >+ uint16_t cfg; > > /* hw->num_lports = 1 in NIC mode */ > vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0); >@@ -1136,14 +1146,10 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) > pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE; > > memset(&vsi_ctx, 0, sizeof(vsi_ctx)); >- /* base_queue in used in queue mapping of VSI add/update command. >- * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ >- * cases in the first stage. Only Main VSI. >- */ >- vsi->base_queue = 0; > switch (type) { > case ICE_VSI_PF: > vsi->nb_qps = pf->lan_nb_qps; >+ vsi->base_queue = 1; > ice_vsi_config_default_rss(&vsi_ctx.info); > vsi_ctx.alloc_from_pool = true; > vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF; >@@ -1157,6 +1163,18 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) > vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING; > vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF | > ICE_AQ_VSI_Q_OPT_RSS_TPLZ; >+ >+ /* FDIR */ >+ cfg = ICE_AQ_VSI_PROP_SECURITY_VALID | >+ ICE_AQ_VSI_PROP_FLOW_DIR_VALID; >+ vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg); >+ cfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE; >+ vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg); >+ vsi_ctx.info.max_fd_fltr_dedicated = >+ rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar); >+ vsi_ctx.info.max_fd_fltr_shared = >+ rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort); >+ > /* Enable VLAN/UP trip */ > ret = ice_vsi_config_tc_queue_mapping(vsi, > &vsi_ctx.info, >@@ -1169,6 +1187,28 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) > goto fail_mem; > } > >+ break; >+ case ICE_VSI_CTRL: >+ vsi->nb_qps = pf->fdir_nb_qps; >+ vsi->base_queue = ICE_FDIR_QUEUE_ID; >+ vsi_ctx.alloc_from_pool = true; >+ vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF; >+ >+ cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID; >+ vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg); >+ cfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE; >+ vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg); >+ vsi_ctx.info.sw_id = hw->port_info->sw_id; >+ ret = ice_vsi_config_tc_queue_mapping(vsi, >+ &vsi_ctx.info, >+ ICE_DEFAULT_TCMAP); >+ if (ret) { >+ PMD_INIT_LOG(ERR, >+ "tc queue mapping with vsi failed, " >+ "err = %d", >+ ret); >+ goto fail_mem; >+ } > break; > default: > /* for other types of VSI */ >@@ -1187,10 +1227,19 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) > } > vsi->msix_intr = ret; > vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID); >+ } else if (type == ICE_VSI_CTRL) { >+ ret = ice_res_pool_alloc(&pf->msix_pool, 1); >+ if (ret < 0) { >+ PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", >+ vsi->vsi_id, ret); >+ } >+ vsi->msix_intr = ret; >+ vsi->nb_msix = 1; > } else { > vsi->msix_intr = 0; > vsi->nb_msix = 0; > } >+ Unnecessary empty line. > ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL); > if (ret != ICE_SUCCESS) { > PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret); >@@ -1202,20 +1251,22 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) > pf->vsis_allocated = vsi_ctx.vsis_allocd; > pf->vsis_unallocated = vsi_ctx.vsis_unallocated; > >- /* MAC configuration */ >- rte_memcpy(pf->dev_addr.addr_bytes, >- hw->port_info->mac.perm_addr, >- ETH_ADDR_LEN); >+ if (type == ICE_VSI_PF) { >+ /* MAC configuration */ >+ rte_memcpy(pf->dev_addr.addr_bytes, >+ hw->port_info->mac.perm_addr, >+ ETH_ADDR_LEN); > >- rte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN); >- ret = ice_add_mac_filter(vsi, &mac_addr); >- if (ret != ICE_SUCCESS) >- PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter"); >+ rte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN); >+ ret = ice_add_mac_filter(vsi, &mac_addr); >+ if (ret != ICE_SUCCESS) >+ PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter"); > >- rte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); >- ret = ice_add_mac_filter(vsi, &mac_addr); >- if (ret != ICE_SUCCESS) >- PMD_INIT_LOG(ERR, "Failed to add MAC filter"); >+ rte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); >+ ret = ice_add_mac_filter(vsi, &mac_addr); >+ if (ret != ICE_SUCCESS) >+ PMD_INIT_LOG(ERR, "Failed to add MAC filter"); >+ } > > /* At the beginning, only TC0. */ > /* What we need here is the maximam number of the TX queues. >@@ -1253,7 +1304,9 @@ ice_send_driver_ver(struct ice_hw *hw) > static int > ice_pf_setup(struct ice_pf *pf) > { >+ struct ice_hw *hw = ICE_PF_TO_HW(pf); > struct ice_vsi *vsi; >+ uint16_t unused; > > /* Clear all stats counters */ > pf->offset_loaded = FALSE; >@@ -1262,6 +1315,13 @@ ice_pf_setup(struct ice_pf *pf) > memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats)); > memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats)); > >+ /* force guaranteed filter pool for PF */ >+ ice_alloc_fd_guar_item(hw, &unused, >+ hw->func_caps.fd_fltr_guar); >+ /* force shared filter pool for PF */ >+ ice_alloc_fd_shrd_item(hw, &unused, >+ hw->func_caps.fd_fltr_best_effort); >+ > vsi = ice_setup_vsi(pf, ICE_VSI_PF); > if (!vsi) { > PMD_INIT_LOG(ERR, "Failed to add vsi for PF"); >@@ -1698,7 +1758,7 @@ ice_dev_init(struct rte_eth_dev *dev) > return ret; > } > >-static int >+int > ice_release_vsi(struct ice_vsi *vsi) > { > struct ice_hw *hw; >@@ -1780,6 +1840,9 @@ ice_dev_stop(struct rte_eth_dev *dev) > /* disable all queue interrupts */ > ice_vsi_disable_queues_intr(main_vsi); > >+ if (pf->fdir.fdir_vsi) >+ ice_vsi_disable_queues_intr(pf->fdir.fdir_vsi); >+ > /* Clear all queues and release mbufs */ > ice_clear_queues(dev); > >@@ -2117,6 +2180,12 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev) > /* Enable interrupts for all the queues */ > ice_vsi_enable_queues_intr(vsi); > >+ /* Enable FDIR MSIX interrupt */ >+ if (pf->fdir.fdir_vsi) { >+ ice_vsi_queues_bind_intr(pf->fdir.fdir_vsi); >+ ice_vsi_enable_queues_intr(pf->fdir.fdir_vsi); >+ } >+ > rte_intr_enable(intr_handle); > > return 0; >diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h >index d1d07641d..c43242b63 100644 >--- a/drivers/net/ice/ice_ethdev.h >+++ b/drivers/net/ice/ice_ethdev.h >@@ -249,6 +249,17 @@ TAILQ_HEAD(ice_flow_list, rte_flow); > struct ice_flow_parser; > TAILQ_HEAD(ice_parser_list, ice_flow_parser); > >+/** >+ * A structure used to define fields of a FDIR related info. >+ */ >+struct ice_fdir_info { >+ struct ice_vsi *fdir_vsi; /* pointer to fdir VSI structure */ >+ struct ice_tx_queue *txq; >+ struct ice_rx_queue *rxq; >+ void *prg_pkt; /* memory for fdir program packet */ >+ uint64_t dma_addr; /* physic address of packet memory*/ >+}; >+ > struct ice_pf { > struct ice_adapter *adapter; /* The adapter this PF associate to */ > struct ice_vsi *main_vsi; /* pointer to main VSI structure */ >@@ -268,6 +279,9 @@ struct ice_pf { > uint16_t lan_nb_qp_max; > uint16_t lan_nb_qps; /* The number of queue pairs of LAN */ > uint16_t base_queue; /* The base queue pairs index in the device */ >+ uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ >+ uint16_t fdir_qp_offset; >+ struct ice_fdir_info fdir; /* flow director info */ > struct ice_hw_port_stats stats_offset; > struct ice_hw_port_stats stats; > /* internal packet statistics, it should be excluded from the total */ >@@ -348,6 +362,11 @@ struct ice_vsi_vlan_pvid_info { > #define ICE_PF_TO_ETH_DEV(pf) \ > (((struct ice_pf *)pf)->adapter->eth_dev) > >+struct ice_vsi * >+ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type); >+int >+ice_release_vsi(struct ice_vsi *vsi); >+ > static inline int > ice_align_floor(int n) > { >diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c >new file mode 100644 >index 000000000..03d143058 >--- /dev/null >+++ b/drivers/net/ice/ice_fdir_filter.c >@@ -0,0 +1,139 @@ >+#include >+#include >+#include "base/ice_fdir.h" >+#include "base/ice_flow.h" >+#include "base/ice_type.h" >+#include "ice_ethdev.h" >+#include "ice_rxtx.h" >+#include "ice_generic_flow.h" >+ >+static const struct rte_memzone * >+ice_memzone_reserve(const char *name, uint32_t len, int socket_id) >+{ >+ const struct rte_memzone *mz; >+ >+ mz = rte_memzone_lookup(name); >+ if (mz) >+ return mz; >+ >+ mz = rte_memzone_reserve_aligned(name, len, socket_id, >+ RTE_MEMZONE_IOVA_CONTIG, >+ ICE_RING_BASE_ALIGN); >+ return mz; I think we can directly return rte_memzone_reserve_aligned(xxx) here. >+} >+ >+#define ICE_FDIR_MZ_NAME "FDIR_MEMZONE" >+ >+/* >+ * ice_fdir_setup - reserve and initialize the Flow Director resources >+ * @pf: board private structure >+ */ >+static int >+ice_fdir_setup(struct ice_pf *pf) >+{ >+ struct rte_eth_dev *eth_dev = pf->adapter->eth_dev; >+ struct ice_hw *hw = ICE_PF_TO_HW(pf); >+ const struct rte_memzone *mz = NULL; >+ char z_name[RTE_MEMZONE_NAMESIZE]; >+ struct ice_vsi *vsi; >+ int err = ICE_SUCCESS; >+ >+ if ((pf->flags & ICE_FLAG_FDIR) == 0) { >+ PMD_INIT_LOG(ERR, "HW doesn't support FDIR"); >+ return -ENOTSUP; >+ } >+ >+ PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u," >+ " fd_fltr_best_effort = %u.", >+ hw->func_caps.fd_fltr_guar, >+ hw->func_caps.fd_fltr_best_effort); >+ >+ if (pf->fdir.fdir_vsi) { >+ PMD_DRV_LOG(INFO, "FDIR initialization has been done."); >+ return ICE_SUCCESS; >+ } >+ >+ /* make new FDIR VSI */ >+ vsi = ice_setup_vsi(pf, ICE_VSI_CTRL); >+ if (!vsi) { >+ PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI."); >+ return -EINVAL; >+ } >+ pf->fdir.fdir_vsi = vsi; >+ >+ /*Fdir tx queue setup*/ >+ err = ice_fdir_setup_tx_resources(pf); >+ if (err) { >+ PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources."); >+ goto fail_setup_tx; >+ } >+ >+ /*Fdir rx queue setup*/ >+ err = ice_fdir_setup_rx_resources(pf); >+ if (err) { >+ PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources."); >+ goto fail_setup_rx; >+ } >+ >+ err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id); >+ if (err) { >+ PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue."); >+ goto fail_mem; >+ } >+ >+ err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id); >+ if (err) { >+ PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue."); >+ goto fail_mem; >+ } >+ >+ /* reserve memory for the fdir programming packet */ >+ snprintf(z_name, sizeof(z_name), "ICE_%s_%d", >+ ICE_FDIR_MZ_NAME, >+ eth_dev->data->port_id); >+ mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY); >+ if (!mz) { >+ PMD_DRV_LOG(ERR, "Cannot init memzone for " >+ "flow director program packet."); >+ err = -ENOMEM; >+ goto fail_mem; >+ } >+ pf->fdir.prg_pkt = mz->addr; >+ pf->fdir.dma_addr = mz->iova; >+ >+ PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.", >+ vsi->base_queue); >+ return ICE_SUCCESS; >+ >+fail_mem: >+ ice_rx_queue_release(pf->fdir.rxq); >+ pf->fdir.rxq = NULL; >+fail_setup_rx: >+ ice_tx_queue_release(pf->fdir.txq); >+ pf->fdir.txq = NULL; >+fail_setup_tx: >+ ice_release_vsi(vsi); >+ pf->fdir.fdir_vsi = NULL; >+ return err; >+} >+ >+static int >+ice_init_fdir_filter(struct ice_adapter *ad) >+{ >+ struct ice_pf *pf = &ad->pf; >+ int ret; >+ >+ ret = ice_fdir_setup(pf); >+ >+ return ret; return ice_fdir_setup(pf); >+} >+ >+static struct ice_flow_engine ice_fdir_engine = { >+ .init = ice_init_fdir_filter, >+ .type = ICE_FLOW_ENGINE_FDIR, >+}; >+ >+RTE_INIT(ice_fdir_init_log) >+{ >+ ice_register_flow_engine(&ice_fdir_engine); >+} >diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c >index 0282b5375..bd802e350 100644 >--- a/drivers/net/ice/ice_rxtx.c >+++ b/drivers/net/ice/ice_rxtx.c >@@ -474,6 +474,175 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) > return 0; > } > >+static enum ice_status >+ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq) >+{ >+ struct ice_vsi *vsi = rxq->vsi; >+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi); >+ struct ice_rlan_ctx rx_ctx; >+ enum ice_status err; >+ uint32_t regval; >+ >+ /** >+ * The kernel driver uses flex descriptor. It sets the register >+ * to flex descriptor mode. >+ * DPDK uses legacy descriptor. It should set the register back >+ * to the default value, then uses legacy descriptor mode. >+ */ I remember haiyue has one patchset to enable flex descriptor in this release, do we have some conflicts here? >+ regval = (0x01 << QRXFLXP_CNTXT_RXDID_PRIO_S) & >+ QRXFLXP_CNTXT_RXDID_PRIO_M; >+ ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval); >+ >+ rxq->rx_hdr_len = 0; >+ rxq->rx_buf_len = 1024; >+ >+ memset(&rx_ctx, 0, sizeof(rx_ctx)); >+ >+ rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; >+ rx_ctx.qlen = rxq->nb_rx_desc; >+ rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; >+ rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; >+ rx_ctx.dtype = 0; /* No Header Split mode */ >+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC >+ rx_ctx.dsize = 1; /* 32B descriptors */ >+#endif >+ rx_ctx.rxmax = RTE_ETHER_MAX_LEN; >+ /* TPH: Transaction Layer Packet (TLP) processing hints */ >+ rx_ctx.tphrdesc_ena = 1; >+ rx_ctx.tphwdesc_ena = 1; >+ rx_ctx.tphdata_ena = 1; >+ rx_ctx.tphhead_ena = 1; >+ /* Low Receive Queue Threshold defined in 64 descriptors units. >+ * When the number of free descriptors goes below the lrxqthresh, >+ * an immediate interrupt is triggered. >+ */ >+ rx_ctx.lrxqthresh = 2; >+ /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/ >+ rx_ctx.l2tsel = 1; >+ rx_ctx.showiv = 0; >+ rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0; >+ >+ err = ice_clear_rxq_ctx(hw, rxq->reg_idx); >+ if (err) { >+ PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context", >+ rxq->queue_id); >+ return -EINVAL; >+ } >+ err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx); >+ if (err) { >+ PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context", >+ rxq->queue_id); >+ return -EINVAL; >+ } >+ >+ rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx); >+ >+ /* Init the Rx tail register*/ >+ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); >+ >+ return 0; >+} >+ >+int >+ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) >+{ >+ struct ice_rx_queue *rxq; >+ int err; >+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); >+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); >+ >+ PMD_INIT_FUNC_TRACE(); >+ >+ rxq = pf->fdir.rxq; >+ if (!rxq || !rxq->q_set) { >+ PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup", >+ rx_queue_id); >+ return -EINVAL; >+ } >+ >+ err = ice_fdir_program_hw_rx_queue(rxq); >+ if (err) { >+ PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u", >+ rx_queue_id); >+ return -EIO; >+ } >+ >+ rte_wmb(); >+ >+ /* Init the RX tail register. */ >+ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); >+ >+ err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE); >+ if (err) { >+ PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on", >+ rx_queue_id); >+ >+ ice_reset_rx_queue(rxq); >+ return -EINVAL; >+ } >+ >+ return 0; >+} >+ >+int >+ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) >+{ >+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); >+ struct ice_tx_queue *txq; >+ int err; >+ struct ice_vsi *vsi; >+ struct ice_hw *hw; >+ struct ice_aqc_add_tx_qgrp txq_elem; >+ struct ice_tlan_ctx tx_ctx; >+ >+ PMD_INIT_FUNC_TRACE(); >+ >+ txq = pf->fdir.txq; >+ if (!txq || !txq->q_set) { >+ PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup", >+ tx_queue_id); >+ return -EINVAL; >+ } >+ >+ vsi = txq->vsi; >+ hw = ICE_VSI_TO_HW(vsi); >+ >+ memset(&txq_elem, 0, sizeof(txq_elem)); >+ memset(&tx_ctx, 0, sizeof(tx_ctx)); >+ txq_elem.num_txqs = 1; >+ txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx); >+ >+ tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; >+ tx_ctx.qlen = txq->nb_tx_desc; >+ tx_ctx.pf_num = hw->pf_id; >+ tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; >+ tx_ctx.src_vsi = vsi->vsi_id; >+ tx_ctx.port_num = hw->port_info->lport; >+ tx_ctx.tso_ena = 1; /* tso enable */ >+ tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */ >+ tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */ >+ >+ ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx, >+ ice_tlan_ctx_info); >+ >+ txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx); >+ >+ /* Init the Tx tail register*/ >+ ICE_PCI_REG_WRITE(txq->qtx_tail, 0); >+ >+ /* Fix me, we assume TC always 0 here */ >+ err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1, >+ &txq_elem, sizeof(txq_elem), NULL); >+ if (err) { >+ PMD_DRV_LOG(ERR, "Failed to add FDIR txq"); >+ return -EIO; >+ } >+ /* store the schedule node id */ >+ txq->q_teid = txq_elem.txqs[0].q_teid; >+ >+ return 0; >+} >+ ice_fdir_program_hw_rx_queue/ ice_fdir_setup_rx_resources / ice_fdir_setup_tx_resources look like similar as ice_program_hw_rx_queue / ice_setup_rx_resources / ice_setup_tx_resources, any chance to resue existing code to avoid duplication? Thanks, Xiaolong > /* Free all mbufs for descriptors in tx queue */ > static void > _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq) >@@ -997,6 +1166,10 @@ ice_rxd_status_to_pkt_flags(uint64_t qword) > ICE_RX_DESC_FLTSTAT_RSS_HASH) == > ICE_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0; > >+ /* Check if FDIR Match */ >+ flags |= (qword & (1 << ICE_RX_DESC_STATUS_FLM_S) ? >+ PKT_RX_FDIR : 0); >+ > return flags; > } > >@@ -1060,6 +1233,33 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_desc *rxdp) > mb->vlan_tci, mb->vlan_tci_outer); > } > >+#define ICE_RX_DESC_EXT_STATUS_FLEXBH_M 0x03 >+#define ICE_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01 >+ >+static inline uint64_t >+ice_rxd_build_fdir(volatile union ice_rx_desc *rxdp, struct rte_mbuf *mb) >+{ >+ uint64_t flags = 0; >+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC >+ uint16_t flexbh; >+ >+ flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >> >+ ICE_RX_DESC_EXT_STATUS_FLEXBH_S) & >+ ICE_RX_DESC_EXT_STATUS_FLEXBH_M; >+ >+ if (flexbh == ICE_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) { >+ mb->hash.fdir.hi = >+ rte_le_to_cpu_32(rxdp->wb.qword3.fd_id); >+ flags |= PKT_RX_FDIR_ID; >+ } >+#else >+ mb->hash.fdir.hi = >+ rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id); >+ flags |= PKT_RX_FDIR_ID; >+#endif >+ return flags; >+} >+ > #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC > #define ICE_LOOK_AHEAD 8 > #if (ICE_LOOK_AHEAD != 8) >@@ -1127,6 +1327,8 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) > mb->hash.rss = > rte_le_to_cpu_32( > rxdp[j].wb.qword0.hi_dword.rss); >+ if (pkt_flags & PKT_RX_FDIR) >+ pkt_flags |= ice_rxd_build_fdir(&rxdp[j], mb); > mb->packet_type = ptype_tbl[(uint8_t)( > (qword1 & > ICE_RXD_QW1_PTYPE_M) >> >@@ -1448,6 +1650,8 @@ ice_recv_scattered_pkts(void *rx_queue, > if (pkt_flags & PKT_RX_RSS_HASH) > first_seg->hash.rss = > rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss); >+ if (pkt_flags & PKT_RX_FDIR) >+ pkt_flags |= ice_rxd_build_fdir(&rxd, first_seg); > > first_seg->ol_flags |= pkt_flags; > /* Prefetch data of first segment, if configured to do so. */ >@@ -1635,6 +1839,127 @@ ice_free_queues(struct rte_eth_dev *dev) > dev->data->nb_tx_queues = 0; > } > >+#define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC >+#define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC >+ >+int >+ice_fdir_setup_tx_resources(struct ice_pf *pf) >+{ >+ struct ice_tx_queue *txq; >+ const struct rte_memzone *tz = NULL; >+ uint32_t ring_size; >+ struct rte_eth_dev *dev; >+ >+ if (!pf) { >+ PMD_DRV_LOG(ERR, "PF is not available"); >+ return -EINVAL; >+ } >+ >+ dev = pf->adapter->eth_dev; >+ >+ /* Allocate the TX queue data structure. */ >+ txq = rte_zmalloc_socket("ice fdir tx queue", >+ sizeof(struct ice_tx_queue), >+ RTE_CACHE_LINE_SIZE, >+ SOCKET_ID_ANY); >+ if (!txq) { >+ PMD_DRV_LOG(ERR, "Failed to allocate memory for " >+ "tx queue structure."); >+ return -ENOMEM; >+ } >+ >+ /* Allocate TX hardware ring descriptors. */ >+ ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC; >+ ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); >+ >+ tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring", >+ ICE_FDIR_QUEUE_ID, ring_size, >+ ICE_RING_BASE_ALIGN, SOCKET_ID_ANY); >+ if (!tz) { >+ ice_tx_queue_release(txq); >+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX."); >+ return -ENOMEM; >+ } >+ >+ txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC; >+ txq->queue_id = ICE_FDIR_QUEUE_ID; >+ txq->reg_idx = pf->fdir.fdir_vsi->base_queue; >+ txq->vsi = pf->fdir.fdir_vsi; >+ >+ txq->tx_ring_dma = tz->iova; >+ txq->tx_ring = (struct ice_tx_desc *)tz->addr; >+ /* >+ * don't need to allocate software ring and reset for the fdir >+ * program queue just set the queue has been configured. >+ */ >+ txq->q_set = TRUE; >+ pf->fdir.txq = txq; >+ >+ txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs; >+ >+ return ICE_SUCCESS; >+} >+ >+int >+ice_fdir_setup_rx_resources(struct ice_pf *pf) >+{ >+ struct ice_rx_queue *rxq; >+ const struct rte_memzone *rz = NULL; >+ uint32_t ring_size; >+ struct rte_eth_dev *dev; >+ >+ if (!pf) { >+ PMD_DRV_LOG(ERR, "PF is not available"); >+ return -EINVAL; >+ } >+ >+ dev = pf->adapter->eth_dev; >+ >+ /* Allocate the RX queue data structure. */ >+ rxq = rte_zmalloc_socket("ice fdir rx queue", >+ sizeof(struct ice_rx_queue), >+ RTE_CACHE_LINE_SIZE, >+ SOCKET_ID_ANY); >+ if (!rxq) { >+ PMD_DRV_LOG(ERR, "Failed to allocate memory for " >+ "rx queue structure."); >+ return -ENOMEM; >+ } >+ >+ /* Allocate RX hardware ring descriptors. */ >+ ring_size = sizeof(union ice_rx_desc) * ICE_FDIR_NUM_RX_DESC; >+ ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); >+ >+ rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring", >+ ICE_FDIR_QUEUE_ID, ring_size, >+ ICE_RING_BASE_ALIGN, SOCKET_ID_ANY); >+ if (!rz) { >+ ice_rx_queue_release(rxq); >+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX."); >+ return -ENOMEM; >+ } >+ >+ rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC; >+ rxq->queue_id = ICE_FDIR_QUEUE_ID; >+ rxq->reg_idx = pf->fdir.fdir_vsi->base_queue; >+ rxq->vsi = pf->fdir.fdir_vsi; >+ >+ rxq->rx_ring_dma = rz->iova; >+ memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC * sizeof(union ice_rx_desc)); >+ rxq->rx_ring = (union ice_rx_desc *)rz->addr; >+ >+ /* >+ * Don't need to allocate software ring and reset for the fdir >+ * rx queue, just set the queue has been configured. >+ */ >+ rxq->q_set = TRUE; >+ pf->fdir.rxq = rxq; >+ >+ rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs; >+ >+ return ICE_SUCCESS; >+} >+ > uint16_t > ice_recv_pkts(void *rx_queue, > struct rte_mbuf **rx_pkts, >@@ -1716,6 +2041,8 @@ ice_recv_pkts(void *rx_queue, > if (pkt_flags & PKT_RX_RSS_HASH) > rxm->hash.rss = > rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss); >+ if (pkt_flags & PKT_RX_FDIR) >+ pkt_flags |= ice_rxd_build_fdir(&rxd, rxm); > rxm->ol_flags |= pkt_flags; > /* copy old mbuf to rx_pkts */ > rx_pkts[nb_rx++] = rxm; >@@ -3061,3 +3388,124 @@ ice_set_default_ptype_table(struct rte_eth_dev *dev) > for (i = 0; i < ICE_MAX_PKT_TYPE; i++) > ad->ptype_tbl[i] = ice_get_default_pkt_type(i); > } >+ >+/* >+ * check the programming status descriptor in rx queue. >+ * done after Programming Flow Director is programmed on >+ * tx queue >+ */ >+static inline int >+ice_check_fdir_programming_status(struct ice_rx_queue *rxq) >+{ >+ volatile union ice_rx_desc *rxdp; >+ uint64_t qword1; >+ uint32_t rx_status; >+ uint32_t len, id; >+ uint32_t error; >+ int ret = 0; >+ >+ rxdp = &rxq->rx_ring[rxq->rx_tail]; >+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); >+ rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >+ >> ICE_RXD_QW1_STATUS_S; >+ >+ if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) { >+ len = qword1 >> ICE_RX_PROG_STATUS_DESC_LEN_S; >+ id = (qword1 & ICE_RX_PROG_STATUS_DESC_QW1_PROGID_M) >> >+ ICE_RX_PROG_STATUS_DESC_QW1_PROGID_S; >+ >+ if (len == ICE_RX_PROG_STATUS_DESC_LEN && >+ id == ICE_RX_PROG_STATUS_DESC_FD_FLTR_STATUS) { >+ error = (qword1 & >+ ICE_RX_PROG_STATUS_DESC_QW1_ERROR_M) >> >+ ICE_RX_PROG_STATUS_DESC_QW1_ERROR_S; >+ if (error == (0x1 << >+ ICE_RX_PROG_STATUS_DESC_FD_TBL_FULL_S)) { >+ PMD_DRV_LOG(ERR, "Failed to add FDIR filter" >+ " (FD_ID %u): programming status" >+ " reported.", >+ rxdp->wb.qword0.hi_dword.fd_id); >+ ret = -1; >+ } else if (error == (0x1 << >+ ICE_RX_PROG_STATUS_DESC_NO_FD_ENTRY_S)) { >+ PMD_DRV_LOG(ERR, "Failed to delete FDIR filter" >+ " (FD_ID %u): programming status" >+ " reported.", >+ rxdp->wb.qword0.hi_dword.fd_id); >+ ret = -1; >+ } else { >+ PMD_DRV_LOG(ERR, "invalid programming status" >+ " reported, error = %u.", error); >+ } >+ } else { >+ PMD_DRV_LOG(INFO, "unknown programming status" >+ " reported, len = %d, id = %u.", len, id); >+ } >+ rxdp->wb.qword1.status_error_len = 0; >+ rxq->rx_tail++; >+ if (unlikely(rxq->rx_tail == rxq->nb_rx_desc)) >+ rxq->rx_tail = 0; >+ if (rxq->rx_tail == 0) >+ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); >+ else >+ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1); >+ } >+ >+ return ret; >+} >+ >+#define ICE_FDIR_MAX_WAIT_US 10000 >+ >+int >+ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc) >+{ >+ struct ice_tx_queue *txq = pf->fdir.txq; >+ struct ice_rx_queue *rxq = pf->fdir.rxq; >+ volatile struct ice_fltr_desc *fdirdp; >+ volatile struct ice_tx_desc *txdp; >+ uint32_t td_cmd; >+ uint16_t i; >+ >+ fdirdp = (volatile struct ice_fltr_desc *) >+ (&txq->tx_ring[txq->tx_tail]); >+ fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat; >+ fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid; >+ >+ txdp = &txq->tx_ring[txq->tx_tail + 1]; >+ txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr); >+ td_cmd = ICE_TX_DESC_CMD_EOP | >+ ICE_TX_DESC_CMD_RS | >+ ICE_TX_DESC_CMD_DUMMY; >+ >+ txdp->cmd_type_offset_bsz = >+ ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0); >+ >+ txq->tx_tail += 2; >+ if (txq->tx_tail >= txq->nb_tx_desc) >+ txq->tx_tail = 0; >+ /* Update the tx tail register */ >+ rte_wmb(); >+ ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); >+ for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) { >+ if ((txdp->cmd_type_offset_bsz & >+ rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) == >+ rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) >+ break; >+ rte_delay_us(1); >+ } >+ if (i >= ICE_FDIR_MAX_WAIT_US) { >+ PMD_DRV_LOG(ERR, >+ "Failed to program FDIR filter: time out to get DD on tx queue."); >+ return -ETIMEDOUT; >+ } >+ >+ for (; i < ICE_FDIR_MAX_WAIT_US; i++) { >+ if (ice_check_fdir_programming_status(rxq) >= 0) >+ return 0; >+ rte_delay_us(1); >+ } >+ >+ PMD_DRV_LOG(ERR, >+ "Failed to program FDIR filter: programming status reported."); >+ return -ETIMEDOUT; >+} >diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h >index e9214110c..450db0244 100644 >--- a/drivers/net/ice/ice_rxtx.h >+++ b/drivers/net/ice/ice_rxtx.h >@@ -36,6 +36,8 @@ > #define ICE_TX_MAX_FREE_BUF_SZ 64 > #define ICE_DESCS_PER_LOOP 4 > >+#define ICE_FDIR_PKT_LEN 512 >+ > typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq); > typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq); > >@@ -147,10 +149,14 @@ int ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); > int ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); > int ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); > int ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); >+int ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); >+int ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); > void ice_rx_queue_release(void *rxq); > void ice_tx_queue_release(void *txq); > void ice_clear_queues(struct rte_eth_dev *dev); > void ice_free_queues(struct rte_eth_dev *dev); >+int ice_fdir_setup_tx_resources(struct ice_pf *pf); >+int ice_fdir_setup_rx_resources(struct ice_pf *pf); > uint16_t ice_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, > uint16_t nb_pkts); > uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, >@@ -188,4 +194,5 @@ uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue, > uint16_t nb_pkts); > uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, > uint16_t nb_pkts); >+int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc); > #endif /* _ICE_RXTX_H_ */ >diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build >index 36b4b3c85..53846442a 100644 >--- a/drivers/net/ice/meson.build >+++ b/drivers/net/ice/meson.build >@@ -10,7 +10,8 @@ sources = files( > 'ice_ethdev.c', > 'ice_rxtx.c', > 'ice_switch_filter.c', >- 'ice_generic_flow.c' >+ 'ice_generic_flow.c', >+ 'ice_fdir_filter.c' > ) > > deps += ['hash'] >-- >2.17.1 >