From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2F556A034F for ; Sat, 9 Oct 2021 05:25:51 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1B0794003C; Sat, 9 Oct 2021 05:25:51 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id E40B24003C; Sat, 9 Oct 2021 05:25:48 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10131"; a="225402801" X-IronPort-AV: E=Sophos;i="5.85,360,1624345200"; d="scan'208";a="225402801" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 Oct 2021 20:25:47 -0700 X-IronPort-AV: E=Sophos;i="5.85,360,1624345200"; d="scan'208";a="479191296" Received: from unknown (HELO localhost.localdomain) ([10.240.183.93]) by orsmga007-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 Oct 2021 20:25:44 -0700 From: dapengx.yu@intel.com To: Jingjing Wu , Beilei Xing , Bruce Richardson , Konstantin Ananyev Cc: dev@dpdk.org, qi.z.zhang@intel.com, ferruh.yigit@intel.com, Dapeng Yu , stable@dpdk.org Date: Sat, 9 Oct 2021 11:25:19 +0800 Message-Id: <20211009032519.335016-1-dapengx.yu@intel.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20210928033753.1955674-1-dapengx.yu@intel.com> References: <20210928033753.1955674-1-dapengx.yu@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-stable] [PATCH v2] net/iavf: fix multi-process shared data X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: stable-bounces@dpdk.org Sender: "stable" From: Dapeng Yu When the iavf_adapter instance is not initialized completedly in the primary process, the secondary process accesses its "rte_eth_dev" member, it causes secondary process crash. This patch replaces eth_dev with eth_dev_data in iavf_adapter. Fixes: f978c1c9b3b5 ("net/iavf: add RSS hash parsing in AVX path") Fixes: 9c9aa0040344 ("net/iavf: add offload path for Rx AVX512 flex descriptor") Fixes: 63660ea3ee0b ("net/iavf: add RSS hash parsing in SSE path") Cc: stable@dpdk.org Signed-off-by: Dapeng Yu --- V2: * Remove access to rte_eth_devices --- drivers/net/iavf/iavf.h | 9 +++--- drivers/net/iavf/iavf_ethdev.c | 12 ++++---- drivers/net/iavf/iavf_fdir.c | 4 +-- drivers/net/iavf/iavf_hash.c | 2 +- drivers/net/iavf/iavf_rxtx.h | 4 +-- drivers/net/iavf/iavf_rxtx_vec_avx2.c | 13 +++++---- drivers/net/iavf/iavf_rxtx_vec_avx512.c | 12 ++++---- drivers/net/iavf/iavf_rxtx_vec_sse.c | 7 +++-- drivers/net/iavf/iavf_vchnl.c | 37 ++++++++++++------------- 9 files changed, 53 insertions(+), 47 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 940d4f79ec..5de43cf9af 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -228,6 +228,8 @@ struct iavf_info { struct virtchnl_qos_cap_list *qos_cap; struct iavf_qtc_map *qtc_map; struct iavf_tm_conf tm_conf; + + struct rte_eth_dev *eth_dev; }; #define IAVF_MAX_PKT_TYPE 1024 @@ -256,7 +258,7 @@ struct iavf_devargs { /* Structure to store private data for each VF instance. */ struct iavf_adapter { struct iavf_hw hw; - struct rte_eth_dev *eth_dev; + struct rte_eth_dev_data *dev_data; struct iavf_info vf; bool rx_bulk_alloc_allowed; @@ -282,8 +284,6 @@ struct iavf_adapter { (&(((struct iavf_vsi *)vsi)->adapter->hw)) #define IAVF_VSI_TO_VF(vsi) \ (&(((struct iavf_vsi *)vsi)->adapter->vf)) -#define IAVF_VSI_TO_ETH_DEV(vsi) \ - (((struct iavf_vsi *)vsi)->adapter->eth_dev) static inline void iavf_init_adminq_parameter(struct iavf_hw *hw) @@ -397,7 +397,8 @@ int iavf_rss_hash_set(struct iavf_adapter *ad, uint64_t rss_hf, bool add); int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter, struct rte_ether_addr *mc_addrs, uint32_t mc_addrs_num, bool add); -int iavf_request_queues(struct iavf_adapter *adapter, uint16_t num); +int iavf_request_queues(struct rte_eth_dev *dev, struct iavf_adapter *adapter, + uint16_t num); int iavf_get_max_rss_queue_region(struct iavf_adapter *adapter); int iavf_get_qos_cap(struct iavf_adapter *adapter); int iavf_set_q_tc_map(struct rte_eth_dev *dev, diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index 5a5a7f59e1..717fa9a8e9 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -383,8 +383,8 @@ iavf_init_rss(struct iavf_adapter *adapter) uint16_t i, j, nb_q; int ret; - rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf; - nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues, + rss_conf = &adapter->dev_data->dev_conf.rx_adv_conf.rss_conf; + nb_q = RTE_MIN(adapter->dev_data->nb_rx_queues, vf->max_rss_qregion); if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) { @@ -438,7 +438,7 @@ iavf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num) struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); int ret; - ret = iavf_request_queues(ad, num); + ret = iavf_request_queues(dev, ad, num); if (ret) { PMD_DRV_LOG(ERR, "request queues from PF failed"); return ret; @@ -1388,7 +1388,7 @@ iavf_dev_rss_hash_update(struct rte_eth_dev *dev, struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); int ret; - adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf = *rss_conf; + adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf; if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) return -ENOTSUP; @@ -2087,6 +2087,8 @@ iavf_init_vf(struct rte_eth_dev *dev) struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + vf->eth_dev = dev; + err = iavf_parse_devargs(dev); if (err) { PMD_INIT_LOG(ERR, "Failed to parse devargs"); @@ -2352,7 +2354,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) hw->bus.func = pci_dev->addr.function; hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private); - adapter->eth_dev = eth_dev; + adapter->dev_data = eth_dev->data; adapter->stopped = 1; if (iavf_init_vf(eth_dev) != 0) { diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c index ea2b692712..2e5d68f9f9 100644 --- a/drivers/net/iavf/iavf_fdir.c +++ b/drivers/net/iavf/iavf_fdir.c @@ -431,7 +431,7 @@ iavf_fdir_parse_action_qregion(struct iavf_adapter *ad, } } - if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) { + if (rss->queue[rss->queue_num - 1] >= ad->dev_data->nb_rx_queues) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Invalid queue region indexes."); @@ -511,7 +511,7 @@ iavf_fdir_parse_action(struct iavf_adapter *ad, filter_action->act_conf.queue.index = act_q->index; if (filter_action->act_conf.queue.index >= - ad->eth_dev->data->nb_rx_queues) { + ad->dev_data->nb_rx_queues) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "Invalid queue for FDIR."); diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c index e84f58d6f4..1f2d3772d1 100644 --- a/drivers/net/iavf/iavf_hash.c +++ b/drivers/net/iavf/iavf_hash.c @@ -1365,7 +1365,7 @@ iavf_hash_uninit(struct iavf_adapter *ad) if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)) return; - rss_conf = &ad->eth_dev->data->dev_conf.rx_adv_conf.rss_conf; + rss_conf = &ad->dev_data->dev_conf.rx_adv_conf.rss_conf; if (iavf_rss_hash_set(ad, rss_conf->rss_hf, false)) PMD_DRV_LOG(ERR, "fail to delete default RSS"); diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h index e210b913d6..25d93a3561 100644 --- a/drivers/net/iavf/iavf_rxtx.h +++ b/drivers/net/iavf/iavf_rxtx.h @@ -577,8 +577,8 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq, #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \ int i; \ - for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \ - struct iavf_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \ + for (i = 0; i < (ad)->dev_data->nb_rx_queues; i++) { \ + struct iavf_rx_queue *rxq = (ad)->dev_data->rx_queues[i]; \ if (!rxq) \ continue; \ rxq->fdir_enabled = on; \ diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c index 96c05d9319..72a4fcab04 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c +++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c @@ -524,7 +524,10 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, { #define IAVF_DESCS_PER_LOOP_AVX 8 - const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl; + struct iavf_adapter *adapter = rxq->vsi->adapter; + + uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads; + const uint32_t *type_table = adapter->ptype_tbl; const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0, rxq->mbuf_initializer); @@ -903,9 +906,8 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, * needs to load 2nd 16B of each desc for RSS hash parsing, * will cause performance drop to get into this context. */ - if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_RSS_HASH || - rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) { + if (offloads & DEV_RX_OFFLOAD_RSS_HASH || + rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) { /* load bottom half of every 32B desc */ const __m128i raw_desc_bh7 = _mm_load_si128 @@ -956,8 +958,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, (_mm256_castsi128_si256(raw_desc_bh0), raw_desc_bh1, 1); - if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_RSS_HASH) { + if (offloads & DEV_RX_OFFLOAD_RSS_HASH) { /** * to shift the 32b RSS hash value to the * highest 32b of each 128b before mask diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c index cb0b057b0f..12375d3d80 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c +++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c @@ -710,8 +710,12 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq, uint8_t *split_packet, bool offload) { + struct iavf_adapter *adapter = rxq->vsi->adapter; + + uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads; + #ifdef IAVF_RX_PTYPE_OFFLOAD - const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl; + const uint32_t *type_table = adapter->ptype_tbl; #endif const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0, @@ -1137,8 +1141,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq, * needs to load 2nd 16B of each desc for RSS hash parsing, * will cause performance drop to get into this context. */ - if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_RSS_HASH || + if (offloads & DEV_RX_OFFLOAD_RSS_HASH || rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) { /* load bottom half of every 32B desc */ const __m128i raw_desc_bh7 = @@ -1190,8 +1193,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq, (_mm256_castsi128_si256(raw_desc_bh0), raw_desc_bh1, 1); - if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_RSS_HASH) { + if (offloads & DEV_RX_OFFLOAD_RSS_HASH) { /** * to shift the 32b RSS hash value to the * highest 32b of each 128b before mask diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c index ee1e905525..edb54991e2 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_sse.c +++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c @@ -644,7 +644,9 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq, uint16_t nb_pkts_recd; int pos; uint64_t var; - const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + struct iavf_adapter *adapter = rxq->vsi->adapter; + uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads; + const uint32_t *ptype_tbl = adapter->ptype_tbl; __m128i crc_adjust = _mm_set_epi16 (0, 0, 0, /* ignore non-length fields */ -rxq->crc_len, /* sub crc on data_len */ @@ -817,8 +819,7 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq, * needs to load 2nd 16B of each desc for RSS hash parsing, * will cause performance drop to get into this context. */ - if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_RSS_HASH) { + if (offloads & DEV_RX_OFFLOAD_RSS_HASH) { /* load bottom half of every 32B desc */ const __m128i raw_desc_bh3 = _mm_load_si128 diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 3275687927..f4fbfdc957 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -72,7 +72,6 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, { struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); - struct rte_eth_dev *dev = adapter->eth_dev; struct iavf_arq_event_info event; enum iavf_aq_result result = IAVF_MSG_NON; enum virtchnl_ops opcode; @@ -114,7 +113,7 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, speed = vpe->event_data.link_event.link_speed; vf->link_speed = iavf_convert_link_speed(speed); } - iavf_dev_link_update(dev, 0); + iavf_dev_link_update(vf->eth_dev, 0); PMD_DRV_LOG(INFO, "Link status update:%s", vf->link_up ? "up" : "down"); break; @@ -690,8 +689,8 @@ iavf_enable_queues(struct iavf_adapter *adapter) memset(&queue_select, 0, sizeof(queue_select)); queue_select.vsi_id = vf->vsi_res->vsi_id; - queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1; - queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1; + queue_select.rx_queues = BIT(adapter->dev_data->nb_rx_queues) - 1; + queue_select.tx_queues = BIT(adapter->dev_data->nb_tx_queues) - 1; args.ops = VIRTCHNL_OP_ENABLE_QUEUES; args.in_args = (u8 *)&queue_select; @@ -718,8 +717,8 @@ iavf_disable_queues(struct iavf_adapter *adapter) memset(&queue_select, 0, sizeof(queue_select)); queue_select.vsi_id = vf->vsi_res->vsi_id; - queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1; - queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1; + queue_select.rx_queues = BIT(adapter->dev_data->nb_rx_queues) - 1; + queue_select.tx_queues = BIT(adapter->dev_data->nb_tx_queues) - 1; args.ops = VIRTCHNL_OP_DISABLE_QUEUES; args.in_args = (u8 *)&queue_select; @@ -789,12 +788,12 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter) queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX; queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0; queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues = - adapter->eth_dev->data->nb_tx_queues; + adapter->dev_data->nb_tx_queues; queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX; queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0; queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues = - adapter->eth_dev->data->nb_rx_queues; + adapter->dev_data->nb_rx_queues; args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2; args.in_args = (u8 *)queue_select; @@ -833,12 +832,12 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter) queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX; queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0; queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues = - adapter->eth_dev->data->nb_tx_queues; + adapter->dev_data->nb_tx_queues; queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX; queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0; queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues = - adapter->eth_dev->data->nb_rx_queues; + adapter->dev_data->nb_rx_queues; args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2; args.in_args = (u8 *)queue_select; @@ -969,9 +968,9 @@ iavf_configure_queues(struct iavf_adapter *adapter, uint16_t num_queue_pairs, uint16_t index) { struct iavf_rx_queue **rxq = - (struct iavf_rx_queue **)adapter->eth_dev->data->rx_queues; + (struct iavf_rx_queue **)adapter->dev_data->rx_queues; struct iavf_tx_queue **txq = - (struct iavf_tx_queue **)adapter->eth_dev->data->tx_queues; + (struct iavf_tx_queue **)adapter->dev_data->tx_queues; struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); struct virtchnl_vsi_queue_config_info *vc_config; struct virtchnl_queue_pair_info *vc_qp; @@ -995,7 +994,7 @@ iavf_configure_queues(struct iavf_adapter *adapter, vc_qp->txq.queue_id = i; /* Virtchnnl configure tx queues by pairs */ - if (i < adapter->eth_dev->data->nb_tx_queues) { + if (i < adapter->dev_data->nb_tx_queues) { vc_qp->txq.ring_len = txq[i]->nb_tx_desc; vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr; } @@ -1004,7 +1003,7 @@ iavf_configure_queues(struct iavf_adapter *adapter, vc_qp->rxq.queue_id = i; vc_qp->rxq.max_pkt_size = vf->max_pkt_len; - if (i >= adapter->eth_dev->data->nb_rx_queues) + if (i >= adapter->dev_data->nb_rx_queues) continue; /* Virtchnnl configure rx queues by pairs */ @@ -1073,7 +1072,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter) return -ENOMEM; map_info->num_vectors = vf->nb_msix; - for (i = 0; i < adapter->eth_dev->data->nb_rx_queues; i++) { + for (i = 0; i < adapter->dev_data->nb_rx_queues; i++) { vecmap = &map_info->vecmap[vf->qv_map[i].vector_id - vf->msix_base]; vecmap->vsi_id = vf->vsi_res->vsi_id; @@ -1152,7 +1151,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add) j = 0; len = sizeof(struct virtchnl_ether_addr_list); for (i = begin; i < IAVF_NUM_MACADDR_MAX; i++, next_begin++) { - addr = &adapter->eth_dev->data->mac_addrs[i]; + addr = &adapter->dev_data->mac_addrs[i]; if (rte_is_zero_ether_addr(addr)) continue; len += sizeof(struct virtchnl_ether_addr); @@ -1169,7 +1168,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add) } for (i = begin; i < next_begin; i++) { - addr = &adapter->eth_dev->data->mac_addrs[i]; + addr = &adapter->dev_data->mac_addrs[i]; if (rte_is_zero_ether_addr(addr)) continue; rte_memcpy(list->list[j].addr, addr->addr_bytes, @@ -1653,9 +1652,9 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter, } int -iavf_request_queues(struct iavf_adapter *adapter, uint16_t num) +iavf_request_queues(struct rte_eth_dev *dev, struct iavf_adapter *adapter, + uint16_t num) { - struct rte_eth_dev *dev = adapter->eth_dev; struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct virtchnl_vf_res_request vfres; -- 2.27.0