DPDK patches and discussions
 help / color / mirror / Atom feed
From: Qi Zhang <qi.z.zhang@intel.com>
To: qiming.yang@intel.com
Cc: liheng.zhang@intel.com, yixue.wang@intel.com, yao.dong@intel.com,
	dev@dpdk.org, Qi Zhang <qi.z.zhang@intel.com>,
	stable@dpdk.org
Subject: [dpdk-dev] [PATCH] net/ice: fix data path corrupt on secondary process
Date: Wed, 26 May 2021 13:59:23 +0800	[thread overview]
Message-ID: <20210526055923.1024336-1-qi.z.zhang@intel.com> (raw)

The rte_eth_devices array is not in share memory, it should not be
referenced by ice_adapter which is shared by primary and secondary.
Any process set ice_adapter->eth_dev will corrupt another process's
context, this happens when a secondary vector path try to use below chain
to get offload configure: rxq->vsi->adatper->eth_dev->data->dev_conf.

The patch removed the field "eth_dev" from ice_adapter.
Now, when the data paths try to access the rte_eth_dev_data instance,
they should replace adapter->eth_dev->data with adapter->pf.dev_data.

Fixes: f9cf4f864150 ("net/ice: support device initialization")
Cc: stable@dpdk.org

Reported-by: Yixue Wang <yixue.wang@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/ice/ice_dcf_parent.c      |  1 -
 drivers/net/ice/ice_ethdev.c          | 13 ++++++-------
 drivers/net/ice/ice_ethdev.h          |  3 ---
 drivers/net/ice/ice_fdir_filter.c     |  6 +++---
 drivers/net/ice/ice_rxtx.c            | 24 +++++++++---------------
 drivers/net/ice/ice_rxtx.h            |  4 ++--
 drivers/net/ice/ice_rxtx_vec_avx2.c   |  2 +-
 drivers/net/ice/ice_rxtx_vec_avx512.c |  2 +-
 drivers/net/ice/ice_rxtx_vec_common.h |  2 +-
 drivers/net/ice/ice_rxtx_vec_sse.c    |  2 +-
 drivers/net/ice/ice_switch_filter.c   |  6 +++---
 11 files changed, 27 insertions(+), 38 deletions(-)

diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 1d7aa8bc87..19420a0f58 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -408,7 +408,6 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
 	const struct rte_ether_addr *mac;
 	int err;
 
-	parent_adapter->eth_dev = eth_dev;
 	parent_adapter->pf.adapter = parent_adapter;
 	parent_adapter->pf.dev_data = eth_dev->data;
 	/* create a dummy main_vsi */
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 5fd5f99b6f..6398e310aa 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -2055,7 +2055,6 @@ ice_dev_init(struct rte_eth_dev *dev)
 	intr_handle = &pci_dev->intr_handle;
 
 	pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-	pf->adapter->eth_dev = dev;
 	pf->dev_data = dev->data;
 	hw->back = pf->adapter;
 	hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
@@ -2218,7 +2217,7 @@ ice_release_vsi(struct ice_vsi *vsi)
 void
 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
 {
-	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+	struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
@@ -2994,7 +2993,7 @@ static int ice_init_rss(struct ice_pf *pf)
 {
 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
 	struct ice_vsi *vsi = pf->main_vsi;
-	struct rte_eth_dev *dev = pf->adapter->eth_dev;
+	struct rte_eth_dev_data *dev_data = pf->dev_data;
 	struct ice_aq_get_set_rss_lut_params lut_params;
 	struct rte_eth_rss_conf *rss_conf;
 	struct ice_aqc_get_set_rss_keys key;
@@ -3003,8 +3002,8 @@ static int ice_init_rss(struct ice_pf *pf)
 	bool is_safe_mode = pf->adapter->is_safe_mode;
 	uint32_t reg;
 
-	rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
-	nb_q = dev->data->nb_rx_queues;
+	rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf;
+	nb_q = dev_data->nb_rx_queues;
 	vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
 	vsi->rss_lut_size = pf->hash_lut_size;
 
@@ -3138,7 +3137,7 @@ __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
 void
 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
 {
-	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+	struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
@@ -3191,7 +3190,7 @@ ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
 void
 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
 {
-	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+	struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index aebfd1b0b7..7f3c26fb6f 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -475,7 +475,6 @@ struct ice_devargs {
 struct ice_adapter {
 	/* Common for both PF and VF */
 	struct ice_hw hw;
-	struct rte_eth_dev *eth_dev;
 	struct ice_pf pf;
 	bool rx_bulk_alloc_allowed;
 	bool rx_vec_allowed;
@@ -526,8 +525,6 @@ struct ice_vsi_vlan_pvid_info {
 	(&(((struct ice_vsi *)vsi)->adapter->hw))
 #define ICE_VSI_TO_PF(vsi) \
 	(&(((struct ice_vsi *)vsi)->adapter->pf))
-#define ICE_VSI_TO_ETH_DEV(vsi) \
-	(((struct ice_vsi *)vsi)->adapter->eth_dev)
 
 /* ICE_PF_TO */
 #define ICE_PF_TO_HW(pf) \
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index 092c704503..5e07eb24ae 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -354,7 +354,7 @@ ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
 static int
 ice_fdir_init_filter_list(struct ice_pf *pf)
 {
-	struct rte_eth_dev *dev = pf->adapter->eth_dev;
+	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
 	struct ice_fdir_info *fdir_info = &pf->fdir;
 	char fdir_hash_name[RTE_HASH_NAMESIZE];
 	int ret;
@@ -416,7 +416,7 @@ ice_fdir_release_filter_list(struct ice_pf *pf)
 static int
 ice_fdir_setup(struct ice_pf *pf)
 {
-	struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
+	struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
 	const struct rte_memzone *mz = NULL;
 	char z_name[RTE_MEMZONE_NAMESIZE];
@@ -604,7 +604,7 @@ ice_fdir_prof_rm_all(struct ice_pf *pf)
 static void
 ice_fdir_teardown(struct ice_pf *pf)
 {
-	struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
+	struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
 	struct ice_vsi *vsi;
 	int err;
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index f4f6f48d78..fc9bb5a3e7 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -255,11 +255,11 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 	struct ice_vsi *vsi = rxq->vsi;
 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
 	struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
-	struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
+	struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data;
 	struct ice_rlan_ctx rx_ctx;
 	enum ice_status err;
 	uint16_t buf_size, len;
-	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+	struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
 	uint32_t rxdid = ICE_RXDID_COMMS_OVS;
 	uint32_t regval;
 
@@ -270,7 +270,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 	rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
 	len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
 	rxq->max_pkt_len = RTE_MIN(len,
-				   dev->data->dev_conf.rxmode.max_rx_pkt_len);
+				   dev_data->dev_conf.rxmode.max_rx_pkt_len);
 
 	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
@@ -366,7 +366,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 
 	/* Check if scattered RX needs to be used. */
 	if (rxq->max_pkt_len > buf_size)
-		dev->data->scattered_rx = 1;
+		dev_data->scattered_rx = 1;
 
 	rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
 
@@ -1675,7 +1675,6 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
 	struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
 	uint16_t nb_rx = 0;
-	struct rte_eth_dev *dev;
 
 	if (!nb_pkts)
 		return 0;
@@ -1692,8 +1691,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		if (ice_rx_alloc_bufs(rxq) != 0) {
 			uint16_t i, j;
 
-			dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
-			dev->data->rx_mbuf_alloc_failed +=
+			rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed +=
 				rxq->rx_free_thresh;
 			PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
 				   "port_id=%u, queue_id=%u",
@@ -1766,7 +1764,6 @@ ice_recv_scattered_pkts(void *rx_queue,
 	uint64_t dma_addr;
 	uint64_t pkt_flags;
 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
-	struct rte_eth_dev *dev;
 
 	while (nb_rx < nb_pkts) {
 		rxdp = &rx_ring[rx_id];
@@ -1779,8 +1776,7 @@ ice_recv_scattered_pkts(void *rx_queue,
 		/* allocate mbuf */
 		nmb = rte_mbuf_raw_alloc(rxq->mp);
 		if (unlikely(!nmb)) {
-			dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
-			dev->data->rx_mbuf_alloc_failed++;
+			rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
 			break;
 		}
 		rxd = *rxdp; /* copy descriptor in ring to temp variable*/
@@ -2104,7 +2100,7 @@ ice_fdir_setup_tx_resources(struct ice_pf *pf)
 		return -EINVAL;
 	}
 
-	dev = pf->adapter->eth_dev;
+	dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
 
 	/* Allocate the TX queue data structure. */
 	txq = rte_zmalloc_socket("ice fdir tx queue",
@@ -2162,7 +2158,7 @@ ice_fdir_setup_rx_resources(struct ice_pf *pf)
 		return -EINVAL;
 	}
 
-	dev = pf->adapter->eth_dev;
+	dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
 
 	/* Allocate the RX queue data structure. */
 	rxq = rte_zmalloc_socket("ice fdir rx queue",
@@ -2231,7 +2227,6 @@ ice_recv_pkts(void *rx_queue,
 	uint64_t dma_addr;
 	uint64_t pkt_flags;
 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
-	struct rte_eth_dev *dev;
 
 	while (nb_rx < nb_pkts) {
 		rxdp = &rx_ring[rx_id];
@@ -2244,8 +2239,7 @@ ice_recv_pkts(void *rx_queue,
 		/* allocate mbuf */
 		nmb = rte_mbuf_raw_alloc(rxq->mp);
 		if (unlikely(!nmb)) {
-			dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
-			dev->data->rx_mbuf_alloc_failed++;
+			rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
 			break;
 		}
 		rxd = *rxdp; /* copy descriptor in ring to temp variable*/
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index b29387ca0f..86b6f3dcc0 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -277,8 +277,8 @@ int ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
 
 #define FDIR_PARSING_ENABLE_PER_QUEUE(ad, on) do { \
 	int i; \
-	for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \
-		struct ice_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \
+	for (i = 0; i < (ad)->pf.dev_data->nb_rx_queues; i++) { \
+		struct ice_rx_queue *rxq = (ad)->pf.dev_data->rx_queues[i]; \
 		if (!rxq) \
 			continue; \
 		rxq->fdir_enabled = on; \
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 8d4bd6df1b..165bc1bb9d 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -466,7 +466,7 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		 * needs to load 2nd 16B of each desc for RSS hash parsing,
 		 * will cause performance drop to get into this context.
 		 */
-		if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+		if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
 				DEV_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh7 =
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index ad6c69da9b..5bba9887d2 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -583,7 +583,7 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 			 * needs to load 2nd 16B of each desc for RSS hash parsing,
 			 * will cause performance drop to get into this context.
 			 */
-			if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+			if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
 					DEV_RX_OFFLOAD_RSS_HASH) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index 6e8d7a6fc5..2d8ef7dc8a 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -195,7 +195,7 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
 	i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
 
 #ifdef CC_AVX512_SUPPORT
-	struct rte_eth_dev *dev = txq->vsi->adapter->eth_dev;
+	struct rte_eth_dev *dev = &rte_eth_devices[txq->vsi->adapter->pf.dev_data->port_id];
 
 	if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||
 	    dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index 6029cc2d99..673e44a243 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -478,7 +478,7 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		 * needs to load 2nd 16B of each desc for RSS hash parsing,
 		 * will cause performance drop to get into this context.
 		 */
-		if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+		if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
 				DEV_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh3 =
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 4a64db5385..eeed386c63 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1585,7 +1585,7 @@ ice_switch_parse_action(struct ice_pf *pf,
 		struct ice_adv_rule_info *rule_info)
 {
 	struct ice_vsi *vsi = pf->main_vsi;
-	struct rte_eth_dev *dev = pf->adapter->eth_dev;
+	struct rte_eth_dev_data *dev_data = pf->adapter->pf.dev_data;
 	const struct rte_flow_action_queue *act_q;
 	const struct rte_flow_action_rss *act_qgrop;
 	uint16_t base_queue, i;
@@ -1616,7 +1616,7 @@ ice_switch_parse_action(struct ice_pf *pf,
 				goto error;
 			if ((act_qgrop->queue[0] +
 				act_qgrop->queue_num) >
-				dev->data->nb_rx_queues)
+				dev_data->nb_rx_queues)
 				goto error1;
 			for (i = 0; i < act_qgrop->queue_num - 1; i++)
 				if (act_qgrop->queue[i + 1] !=
@@ -1627,7 +1627,7 @@ ice_switch_parse_action(struct ice_pf *pf,
 			break;
 		case RTE_FLOW_ACTION_TYPE_QUEUE:
 			act_q = action->conf;
-			if (act_q->index >= dev->data->nb_rx_queues)
+			if (act_q->index >= dev_data->nb_rx_queues)
 				goto error;
 			rule_info->sw_act.fltr_act =
 				ICE_FWD_TO_Q;
-- 
2.26.2


             reply	other threads:[~2021-05-26  5:56 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-26  5:59 Qi Zhang [this message]
2021-05-26  6:12 ` [dpdk-dev] [PATCH v2] " Qi Zhang
2021-06-04  6:51   ` Wang, Yixue
2021-06-04  7:21     ` Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210526055923.1024336-1-qi.z.zhang@intel.com \
    --to=qi.z.zhang@intel.com \
    --cc=dev@dpdk.org \
    --cc=liheng.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    --cc=stable@dpdk.org \
    --cc=yao.dong@intel.com \
    --cc=yixue.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).