DPDK patches and discussions
 help / color / mirror / Atom feed
From: Haiyue Wang <haiyue.wang@intel.com>
To: dev@dpdk.org
Cc: qi.z.zhang@intel.com, Haiyue Wang <haiyue.wang@intel.com>,
	Alvin Zhang <alvinx.zhang@intel.com>,
	Qiming Yang <qiming.yang@intel.com>, Jeff Guo <jia.guo@intel.com>
Subject: [dpdk-dev] [PATCH v2] net/ice: fix DCF Rx segmentation fault
Date: Thu, 29 Oct 2020 09:13:22 +0800	[thread overview]
Message-ID: <20201029011322.39392-1-haiyue.wang@intel.com> (raw)
In-Reply-To: <20201028165545.20665-1-haiyue.wang@intel.com>

The initialization of selecting the handler for scalar Rx path FlexiMD
fields extraction into mbuf is missed, it will cause segmentation fault
(core dumped).

Also add the missed support to handle RXDID 16, which has RSS hash value
on Qword 1.

Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")

Reported-by: Alvin Zhang <alvinx.zhang@intel.com>
Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
---
v2: The RSS hash is wrong, fix it.
---
 drivers/net/ice/ice_dcf.c  |  1 +
 drivers/net/ice/ice_rxtx.c | 28 +++++++++++++++++++++++++++-
 drivers/net/ice/ice_rxtx.h |  2 ++
 3 files changed, 30 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index d20e2b3f48..44dbd3bb84 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -899,6 +899,7 @@ ice_dcf_configure_queues(struct ice_dcf_hw *hw)
 			return -EINVAL;
 		}
 #endif
+		ice_select_rxd_to_pkt_fields_handler(rxq[i], vc_qp->rxq.rxdid);
 	}
 
 	memset(&args, 0, sizeof(args));
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index f6291894cd..1bac643125 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -43,6 +43,28 @@ ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
 				rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
 }
 
+static inline void
+ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
+				       struct rte_mbuf *mb,
+				       volatile union ice_rx_flex_desc *rxdp)
+{
+	volatile struct ice_32b_rx_flex_desc_comms *desc =
+			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
+	uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
+
+	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+		mb->ol_flags |= PKT_RX_RSS_HASH;
+		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+	}
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+	if (desc->flow_id != 0xFFFFFFFF) {
+		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+	}
+#endif
+}
+
 static inline void
 ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
 				   struct rte_mbuf *mb,
@@ -148,7 +170,7 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
 #endif
 }
 
-static void
+void
 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
 {
 	switch (rxdid) {
@@ -182,6 +204,10 @@ ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
 		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
 
+	case ICE_RXDID_COMMS_GENERIC:
+		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
+		break;
+
 	case ICE_RXDID_COMMS_OVS:
 		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
 		break;
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 23409d479a..6b16716063 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -234,6 +234,8 @@ int ice_rx_descriptor_status(void *rx_queue, uint16_t offset);
 int ice_tx_descriptor_status(void *tx_queue, uint16_t offset);
 void ice_set_default_ptype_table(struct rte_eth_dev *dev);
 const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+void ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq,
+					  uint32_t rxdid);
 
 int ice_rx_vec_dev_check(struct rte_eth_dev *dev);
 int ice_tx_vec_dev_check(struct rte_eth_dev *dev);
-- 
2.29.0


  reply	other threads:[~2020-10-29  1:26 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-28 16:55 [dpdk-dev] [PATCH v1] " Haiyue Wang
2020-10-29  1:13 ` Haiyue Wang [this message]
2020-10-29  5:35   ` [dpdk-dev] [PATCH v2] " Zhang, Qi Z
2020-10-29  1:19 ` [dpdk-dev] [PATCH v1] " Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201029011322.39392-1-haiyue.wang@intel.com \
    --to=haiyue.wang@intel.com \
    --cc=alvinx.zhang@intel.com \
    --cc=dev@dpdk.org \
    --cc=jia.guo@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).