DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ye Xiaolong <xiaolong.ye@intel.com>
To: Yahui Cao <yahui.cao@intel.com>
Cc: Qiming Yang <qiming.yang@intel.com>,
	Wenzhuo Lu <wenzhuo.lu@intel.com>,
	dev@dpdk.org, Qi Zhang <qi.z.zhang@intel.com>,
	Beilei Xing <beilei.xing@intel.com>
Subject: Re: [dpdk-dev] [PATCH] net/ice: fix FDIR programming status check issue
Date: Thu, 14 Nov 2019 15:23:05 +0800	[thread overview]
Message-ID: <20191114072305.GI66623@intel.com> (raw)
In-Reply-To: <20191113160921.90573-1-yahui.cao@intel.com>

On 11/14, Yahui Cao wrote:
>To make sure if FDIR programming succeed or fail, legacy programming
>status descriptor WB format is enabled and FDIR queue irq is opened.
>
>Fixes: 84dc7a95a2d3 ("net/ice: enable flow director engine")
>Cc: beilei.xing@intel.com
>
>Signed-off-by: Yahui Cao <yahui.cao@intel.com>
>Signed-off-by: Beilei Xing <beilei.xing@intel.com>
>---
> drivers/net/ice/ice_ethdev.c |  1 +
> drivers/net/ice/ice_rxtx.c   | 93 +++++++++++++++++++++++++++++++++---
> 2 files changed, 88 insertions(+), 6 deletions(-)
>
>diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
>index 3b20ea423..27b0fbc83 100644
>--- a/drivers/net/ice/ice_ethdev.c
>+++ b/drivers/net/ice/ice_ethdev.c
>@@ -2726,6 +2726,7 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev)
> 
> 	/* Enable FDIR MSIX interrupt */
> 	if (pf->fdir.fdir_vsi) {
>+		pf->fdir.fdir_vsi->nb_used_qps = 1;
> 		ice_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
> 		ice_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
> 	}
>diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
>index 18c02979e..2db174456 100644
>--- a/drivers/net/ice/ice_rxtx.c
>+++ b/drivers/net/ice/ice_rxtx.c
>@@ -535,7 +535,7 @@ ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
> {
> 	struct ice_vsi *vsi = rxq->vsi;
> 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
>-	uint32_t rxdid = ICE_RXDID_COMMS_GENERIC;
>+	uint32_t rxdid = ICE_RXDID_LEGACY_1;
> 	struct ice_rlan_ctx rx_ctx;
> 	enum ice_status err;
> 	uint32_t regval;
>@@ -550,9 +550,7 @@ ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
> 	rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
> 	rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
> 	rx_ctx.dtype = 0; /* No Header Split mode */
>-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> 	rx_ctx.dsize = 1; /* 32B descriptors */
>-#endif
> 	rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
> 	/* TPH: Transaction Layer Packet (TLP) processing hints */
> 	rx_ctx.tphrdesc_ena = 1;
>@@ -2077,7 +2075,7 @@ ice_fdir_setup_rx_resources(struct ice_pf *pf)
> 	}
> 
> 	/* Allocate RX hardware ring descriptors. */
>-	ring_size = sizeof(union ice_rx_flex_desc) * ICE_FDIR_NUM_RX_DESC;
>+	ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
> 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
> 
> 	rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
>@@ -2096,7 +2094,7 @@ ice_fdir_setup_rx_resources(struct ice_pf *pf)
> 
> 	rxq->rx_ring_dma = rz->iova;
> 	memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
>-	       sizeof(union ice_rx_flex_desc));
>+	       sizeof(union ice_32byte_rx_desc));
> 	rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
> 
> 	/*
>@@ -3607,12 +3605,81 @@ ice_set_default_ptype_table(struct rte_eth_dev *dev)
> 		ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
> }
> 
>+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S	1
>+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M	\
>+			(0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
>+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
>+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
>+
>+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S	4
>+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M	\
>+	(1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
>+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S	5
>+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M	\
>+	(1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
>+
>+/*
>+ * check the programming status descriptor in rx queue.
>+ * done after Programming Flow Director is programmed on
>+ * tx queue
>+ */
>+static inline int
>+ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
>+{
>+	volatile union ice_32byte_rx_desc *rxdp;
>+	uint64_t qword1;
>+	uint32_t rx_status;
>+	uint32_t error;
>+	uint32_t id;
>+	int ret = -EAGAIN;
>+
>+	rxdp = (volatile union ice_32byte_rx_desc *)
>+		(&rxq->rx_ring[rxq->rx_tail]);
>+	qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
>+	rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
>+			>> ICE_RXD_QW1_STATUS_S;
>+
>+	if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
>+		ret = 0;
>+		error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
>+			ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
>+		id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
>+			ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
>+		if (error) {
>+			if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
>+				PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
>+			else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
>+				PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
>+			ret = -EINVAL;
>+			goto err;
>+		}
>+		error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
>+			ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
>+		if (error) {
>+			PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
>+			ret = -EINVAL;
>+		}
>+err:
>+		rxdp->wb.qword1.status_error_len = 0;
>+		rxq->rx_tail++;
>+		if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
>+			rxq->rx_tail = 0;
>+		if (rxq->rx_tail == 0)
>+			ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
>+		else
>+			ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
>+	}
>+
>+	return ret;
>+}
>+
> #define ICE_FDIR_MAX_WAIT_US 10000
> 
> int
> ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
> {
> 	struct ice_tx_queue *txq = pf->fdir.txq;
>+	struct ice_rx_queue *rxq = pf->fdir.rxq;
> 	volatile struct ice_fltr_desc *fdirdp;
> 	volatile struct ice_tx_desc *txdp;
> 	uint32_t td_cmd;
>@@ -3650,5 +3717,19 @@ ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
> 		return -ETIMEDOUT;
> 	}
> 
>-	return 0;
>+	for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
>+		int ret;
>+
>+		ret = ice_check_fdir_programming_status(rxq);
>+		if (ret == -EAGAIN)
>+			rte_delay_us(1);
>+		else
>+			return ret;
>+	}
>+
>+	PMD_DRV_LOG(ERR,
>+		    "Failed to program FDIR filter: programming status reported.");
>+	return -ETIMEDOUT;
>+
>+
> }
>-- 
>2.17.1
>

Acked-by: Xiaolong Ye <xiaolong.ye@intel.com>

Applied to dpdk-next-net-intel, Thanks.

  reply	other threads:[~2019-11-14  7:26 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-11-13 16:09 Yahui Cao
2019-11-14  7:23 ` Ye Xiaolong [this message]
2019-11-15  9:06   ` Ferruh Yigit
2019-11-18 22:23 ` [dpdk-dev] [PATCH v2 0/2] fix testpmd quit error after FDIR rule created Yahui Cao
2019-11-18 22:23   ` [dpdk-dev] [PATCH v2 1/2] net/ice: fix FDIR programming status check issue Yahui Cao
2019-11-18 22:23   ` [dpdk-dev] [PATCH v2 2/2] net/ice: fix FDIR rule failure after device stop Yahui Cao
2019-11-19  1:14   ` [dpdk-dev] [PATCH v2 0/2] fix testpmd quit error after FDIR rule created Zhang, Qi Z
2019-11-19  5:11   ` Ye Xiaolong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191114072305.GI66623@intel.com \
    --to=xiaolong.ye@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    --cc=wenzhuo.lu@intel.com \
    --cc=yahui.cao@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).