From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wm0-f67.google.com (mail-wm0-f67.google.com [74.125.82.67]) by dpdk.org (Postfix) with ESMTP id 12D51558C for ; Mon, 21 Nov 2016 17:57:08 +0100 (CET) Received: by mail-wm0-f67.google.com with SMTP id g23so3623066wme.1 for ; Mon, 21 Nov 2016 08:57:08 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=sender:from:to:cc:subject:date:message-id:in-reply-to:references; bh=YDRKfdMyBiwkZGyljDk7AB8PnRD3FRM1VkiZoq4CjOA=; b=NM035IPN6dadoyrzmglEnq+hfrJrgwzHfjVjVjld3eign6NH6gSdInJySdsfB8E31w Ya0cByrJJ/OIEF3otosE2+yhfjOvW9kX84x3qLxByWRkFs0BQ74J2VWKrXD8Z1eQg3rp B59AnfruROQCLMYlY/zrJ89SeBYB5LLHjkU6cgTwJfqqDpD3i+ZZDlNB43FtbOHkhgtZ YwLqK2ebXonnDHxqIisTdYaBrdEMpyGRO30/xAQm5+nU/Upet2kU6md65V6gpy0HiLTo FICFrZ41XQPjkeyc57wtbCz/XD3aqemxUBtekEhvvSFJtKErYFFMYvwykLxDIyB4rHV1 DI+A== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:sender:from:to:cc:subject:date:message-id :in-reply-to:references; bh=YDRKfdMyBiwkZGyljDk7AB8PnRD3FRM1VkiZoq4CjOA=; b=i0Q9kASFkNlYHKaeSd6QzGr5qp+5ykPYlsD0m/bWtAuENPner+h/tMMzValX365pfD AZMe54m1SKigZtFsvPEx4K5g9Kb9scGwKiN4XYLM3tfH1C7Pt7gqp+Qve3S3y5NQ+ZcK ZczRZc38nD3Nxh+v6Xtodl/XwrUREr9+3wjga8lyfPKwWa/gE3Qf7FopqYOG2Q76H5Gc ho6qZiglrz7BEuKgJy/Kvi4/vWD7VrxIRNfjuqI3lstudq9puhWZy83dhbQMT6TGqUci vFCU8b+689wDY12CL1cJxzbpLxgEgAYvXyZfjkqJG+WbmeCQWAmVq4Uydu13JRWkq5V0 Ub+w== X-Gm-Message-State: AKaTC00mD9sbWrT6WWxPSrycv7H0D0OlnF74y9nqFlzXZOWGvZ6rx8zSt5y8qPuqvow09w== X-Received: by 10.28.45.212 with SMTP id t203mr17129226wmt.46.1479747427959; Mon, 21 Nov 2016 08:57:07 -0800 (PST) Received: from weierstrass.local.net ([91.200.110.70]) by smtp.gmail.com with ESMTPSA id v202sm20536051wmv.8.2016.11.21.08.57.06 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 21 Nov 2016 08:57:06 -0800 (PST) Sender: Jan Blunck From: Jan Blunck To: dev@dpdk.org Cc: shreyansh.jain@nxp.com, david.marchand@6wind.com Date: Mon, 21 Nov 2016 17:55:17 +0100 Message-Id: <1479747322-5774-3-git-send-email-jblunck@infradead.org> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1479747322-5774-1-git-send-email-jblunck@infradead.org> References: <1479747322-5774-1-git-send-email-jblunck@infradead.org> Subject: [dpdk-dev] [PATCH v2 3/8] drivers: Use ETH_DEV_PCI_DEV() helper X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 21 Nov 2016 16:57:09 -0000 The drivers should not directly access the rte_eth_dev->pci_dev but use a macro instead. This is a preparation for replacing the pci_dev with a struct rte_device member in the future. Signed-off-by: Jan Blunck --- drivers/net/bnxt/bnxt_ethdev.c | 19 ++++++----- drivers/net/bnxt/bnxt_ring.c | 11 +++--- drivers/net/cxgbe/cxgbe_ethdev.c | 2 +- drivers/net/e1000/em_ethdev.c | 20 ++++++----- drivers/net/e1000/igb_ethdev.c | 50 +++++++++++++++------------ drivers/net/e1000/igb_pf.c | 3 +- drivers/net/ena/ena_ethdev.c | 2 +- drivers/net/enic/enic_ethdev.c | 2 +- drivers/net/fm10k/fm10k_ethdev.c | 49 ++++++++++++++------------- drivers/net/i40e/i40e_ethdev.c | 44 ++++++++++++------------ drivers/net/i40e/i40e_ethdev.h | 4 +++ drivers/net/i40e/i40e_ethdev_vf.c | 38 ++++++++++----------- drivers/net/ixgbe/ixgbe_ethdev.c | 65 +++++++++++++++++++++--------------- drivers/net/ixgbe/ixgbe_pf.c | 2 +- drivers/net/qede/qede_ethdev.c | 17 +++++----- drivers/net/vmxnet3/vmxnet3_ethdev.c | 4 +-- 16 files changed, 185 insertions(+), 147 deletions(-) diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index 035fe07..cd50f11 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -743,6 +743,7 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(eth_dev); /* Retrieve from the default VNIC */ if (!vnic) @@ -759,7 +760,7 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, /* EW - need to revisit here copying from u64 to u16 */ memcpy(reta_conf, vnic->rss_table, reta_size); - if (rte_intr_allow_others(ð_dev->pci_dev->intr_handle)) { + if (rte_intr_allow_others(&pci_dev->intr_handle)) { if (eth_dev->data->dev_conf.intr_conf.lsc != 0) bnxt_dev_lsc_intr_setup(eth_dev); } @@ -1011,9 +1012,10 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev) { int rc; struct bnxt *bp = eth_dev->data->dev_private; + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(eth_dev); /* enable device (incl. PCI PM wakeup), and bus-mastering */ - if (!eth_dev->pci_dev->mem_resource[0].addr) { + if (!pci_dev->mem_resource[0].addr) { RTE_LOG(ERR, PMD, "Cannot find PCI device base address, aborting\n"); rc = -ENODEV; @@ -1021,9 +1023,9 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev) } bp->eth_dev = eth_dev; - bp->pdev = eth_dev->pci_dev; + bp->pdev = pci_dev; - bp->bar0 = (void *)eth_dev->pci_dev->mem_resource[0].addr; + bp->bar0 = (void *)pci_dev->mem_resource[0].addr; if (!bp->bar0) { RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n"); rc = -ENOMEM; @@ -1043,6 +1045,7 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev) static int bnxt_dev_init(struct rte_eth_dev *eth_dev) { + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(eth_dev); static int version_printed; struct bnxt *bp; int rc; @@ -1050,10 +1053,10 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) if (version_printed++ == 0) RTE_LOG(INFO, PMD, "%s", bnxt_version); - rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev); + rte_eth_copy_pci_info(eth_dev, pci_dev); bp = eth_dev->data->dev_private; - if (bnxt_vf_pciid(eth_dev->pci_dev->id.device_id)) + if (bnxt_vf_pciid(pci_dev->id.device_id)) bp->flags |= BNXT_FLAG_VF; rc = bnxt_init_board(eth_dev); @@ -1121,8 +1124,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) RTE_LOG(INFO, PMD, DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n", - eth_dev->pci_dev->mem_resource[0].phys_addr, - eth_dev->pci_dev->mem_resource[0].addr); + pci_dev->mem_resource[0].phys_addr, + pci_dev->mem_resource[0].addr); bp->dev_stopped = 0; diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c index 3f81ffc..6793d75 100644 --- a/drivers/net/bnxt/bnxt_ring.c +++ b/drivers/net/bnxt/bnxt_ring.c @@ -209,6 +209,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, */ int bnxt_alloc_hwrm_rings(struct bnxt *bp) { + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(bp->eth_dev); unsigned int i; int rc = 0; @@ -223,7 +224,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) if (rc) goto err_out; cpr->cp_doorbell = - (char *)bp->eth_dev->pci_dev->mem_resource[2].addr; + (char *)pci_dev->mem_resource[2].addr; B_CP_DIS_DB(cpr, cpr->cp_raw_cons); bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id; } @@ -243,7 +244,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) if (rc) goto err_out; cpr->cp_doorbell = - (char *)bp->eth_dev->pci_dev->mem_resource[2].addr + + (char *)pci_dev->mem_resource[2].addr + idx * 0x80; bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id; B_CP_DIS_DB(cpr, cpr->cp_raw_cons); @@ -256,7 +257,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) goto err_out; rxr->rx_prod = 0; rxr->rx_doorbell = - (char *)bp->eth_dev->pci_dev->mem_resource[2].addr + + (char *)pci_dev->mem_resource[2].addr + idx * 0x80; bp->grp_info[idx].rx_fw_ring_id = ring->fw_ring_id; B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); @@ -284,7 +285,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) goto err_out; cpr->cp_doorbell = - (char *)bp->eth_dev->pci_dev->mem_resource[2].addr + + (char *)pci_dev->mem_resource[2].addr + idx * 0x80; bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id; B_CP_DIS_DB(cpr, cpr->cp_raw_cons); @@ -297,7 +298,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) goto err_out; txr->tx_doorbell = - (char *)bp->eth_dev->pci_dev->mem_resource[2].addr + + (char *)pci_dev->mem_resource[2].addr + idx * 0x80; } diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c index b7f28eb..8bfdda8 100644 --- a/drivers/net/cxgbe/cxgbe_ethdev.c +++ b/drivers/net/cxgbe/cxgbe_ethdev.c @@ -1005,7 +1005,7 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - pci_dev = eth_dev->pci_dev; + pci_dev = ETH_DEV_PCI_DEV(eth_dev); snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id); adapter = rte_zmalloc(name, sizeof(*adapter), 0); diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c index aee3d34..7f2f521 100644 --- a/drivers/net/e1000/em_ethdev.c +++ b/drivers/net/e1000/em_ethdev.c @@ -295,7 +295,7 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev) struct e1000_vfta * shadow_vfta = E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); - pci_dev = eth_dev->pci_dev; + pci_dev = ETH_DEV_PCI_DEV(eth_dev); eth_dev->dev_ops = ð_em_ops; eth_dev->rx_pkt_burst = (eth_rx_burst_t)ð_em_recv_pkts; @@ -369,7 +369,7 @@ eth_em_dev_uninit(struct rte_eth_dev *eth_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return -EPERM; - pci_dev = eth_dev->pci_dev; + pci_dev = ETH_DEV_PCI_DEV(eth_dev); if (adapter->stopped == 0) eth_em_close(eth_dev); @@ -556,7 +556,8 @@ eth_em_start(struct rte_eth_dev *dev) E1000_DEV_PRIVATE(dev->data->dev_private); struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int ret, mask; uint32_t intr_vector = 0; uint32_t *speeds; @@ -738,7 +739,8 @@ eth_em_stop(struct rte_eth_dev *dev) { struct rte_eth_link link; struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; em_rxq_intr_disable(hw); em_lsc_intr_disable(hw); @@ -999,9 +1001,10 @@ static int eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); em_rxq_intr_enable(hw); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(&pci_dev->intr_handle); return 0; } @@ -1542,6 +1545,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev) E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_interrupt *intr = E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); uint32_t tctl, rctl; struct rte_eth_link link; int ret; @@ -1550,7 +1554,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev) return -1; intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(&pci_dev->intr_handle); /* set get_link_status to check register later */ hw->mac.get_link_status = 1; @@ -1571,8 +1575,8 @@ eth_em_interrupt_action(struct rte_eth_dev *dev) PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id); } PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", - dev->pci_dev->addr.domain, dev->pci_dev->addr.bus, - dev->pci_dev->addr.devid, dev->pci_dev->addr.function); + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); tctl = E1000_READ_REG(hw, E1000_TCTL); rctl = E1000_READ_REG(hw, E1000_RCTL); diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index 2fddf0c..b25c66e 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -672,11 +672,11 @@ igb_identify_hardware(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - - hw->vendor_id = dev->pci_dev->id.vendor_id; - hw->device_id = dev->pci_dev->id.device_id; - hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id; - hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id; + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); + hw->vendor_id = pci_dev->id.vendor_id; + hw->device_id = pci_dev->id.device_id; + hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; + hw->subsystem_device_id = pci_dev->id.subsystem_device_id; e1000_set_mac_type(hw); @@ -755,7 +755,7 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev) uint32_t ctrl_ext; - pci_dev = eth_dev->pci_dev; + pci_dev = ETH_DEV_PCI_DEV(eth_dev); eth_dev->dev_ops = ð_igb_ops; eth_dev->rx_pkt_burst = ð_igb_recv_pkts; @@ -918,7 +918,7 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev) return -EPERM; hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - pci_dev = eth_dev->pci_dev; + pci_dev = ETH_DEV_PCI_DEV(eth_dev); if (adapter->stopped == 0) eth_igb_close(eth_dev); @@ -973,7 +973,7 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) return 0; } - pci_dev = eth_dev->pci_dev; + pci_dev = ETH_DEV_PCI_DEV(eth_dev); rte_eth_copy_pci_info(eth_dev, pci_dev); @@ -1050,7 +1050,7 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev) { struct e1000_adapter *adapter = E1000_DEV_PRIVATE(eth_dev->data->dev_private); - struct rte_pci_device *pci_dev = eth_dev->pci_dev; + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(eth_dev); PMD_INIT_FUNC_TRACE(); @@ -1217,7 +1217,8 @@ eth_igb_start(struct rte_eth_dev *dev) E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_adapter *adapter = E1000_DEV_PRIVATE(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = + Ð_DEV_PCI_DEV(dev)->intr_handle; int ret, mask; uint32_t intr_vector = 0; uint32_t ctrl_ext; @@ -1429,7 +1430,8 @@ eth_igb_stop(struct rte_eth_dev *dev) struct e1000_flex_filter *p_flex; struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next; struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = + Ð_DEV_PCI_DEV(dev)->intr_handle; igb_intr_disable(hw); @@ -1549,7 +1551,7 @@ eth_igb_close(struct rte_eth_dev *dev) igb_dev_free_queues(dev); - pci_dev = dev->pci_dev; + pci_dev = ETH_DEV_PCI_DEV(dev); if (pci_dev->intr_handle.intr_vec) { rte_free(pci_dev->intr_handle.intr_vec); pci_dev->intr_handle.intr_vec = NULL; @@ -2639,6 +2641,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev) E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_interrupt *intr = E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); uint32_t tctl, rctl; struct rte_eth_link link; int ret; @@ -2649,7 +2652,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev) } igb_intr_enable(dev); - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(&pci_dev->intr_handle); if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) { intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; @@ -2677,10 +2680,10 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev) } PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", - dev->pci_dev->addr.domain, - dev->pci_dev->addr.bus, - dev->pci_dev->addr.devid, - dev->pci_dev->addr.function); + pci_dev->addr.domain, + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); tctl = E1000_READ_REG(hw, E1000_TCTL); rctl = E1000_READ_REG(hw, E1000_RCTL); if (link.link_status) { @@ -2770,7 +2773,7 @@ eth_igbvf_interrupt_action(struct rte_eth_dev *dev) } igbvf_intr_enable(dev); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(Ð_DEV_PCI_DEV(dev)->intr_handle); return 0; } @@ -3056,7 +3059,8 @@ igbvf_dev_start(struct rte_eth_dev *dev) struct e1000_adapter *adapter = E1000_DEV_PRIVATE(dev->data->dev_private); int ret; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = + Ð_DEV_PCI_DEV(dev)->intr_handle; uint32_t intr_vector = 0; PMD_INIT_FUNC_TRACE(); @@ -3110,7 +3114,8 @@ igbvf_dev_start(struct rte_eth_dev *dev) static void igbvf_dev_stop(struct rte_eth_dev *dev) { - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = + Ð_DEV_PCI_DEV(dev)->intr_handle; PMD_INIT_FUNC_TRACE(); @@ -5102,7 +5107,7 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) E1000_WRITE_REG(hw, E1000_EIMS, regval | mask); E1000_WRITE_FLUSH(hw); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(Ð_DEV_PCI_DEV(dev)->intr_handle); return 0; } @@ -5167,7 +5172,8 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev) uint32_t base = E1000_MISC_VEC_ID; uint32_t misc_shift = 0; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = + Ð_DEV_PCI_DEV(dev)->intr_handle; /* won't configure msix register if no mapping is done * between intr vector and event fd diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c index 5845bc2..6a72ee1 100644 --- a/drivers/net/e1000/igb_pf.c +++ b/drivers/net/e1000/igb_pf.c @@ -57,7 +57,8 @@ static inline uint16_t dev_num_vf(struct rte_eth_dev *eth_dev) { - return eth_dev->pci_dev->max_vfs; + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(eth_dev); + return pci_dev->max_vfs; } static inline diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index ab9a178..c17d969 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -1278,7 +1278,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - pci_dev = eth_dev->pci_dev; + pci_dev = ETH_DEV_PCI_DEV(eth_dev); adapter->pdev = pci_dev; PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n", diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c index 2b154ec..553a88e 100644 --- a/drivers/net/enic/enic_ethdev.c +++ b/drivers/net/enic/enic_ethdev.c @@ -621,7 +621,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = &enic_recv_pkts; eth_dev->tx_pkt_burst = &enic_xmit_pkts; - pdev = eth_dev->pci_dev; + pdev = ETH_DEV_PCI_DEV(eth_dev); rte_eth_copy_pci_info(eth_dev, pdev); enic->pdev = pdev; addr = &pdev->addr; diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c index 923690c..e1250f6 100644 --- a/drivers/net/fm10k/fm10k_ethdev.c +++ b/drivers/net/fm10k/fm10k_ethdev.c @@ -59,7 +59,8 @@ #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1) /* default 1:1 map from queue ID to interrupt vector ID */ -#define Q2V(dev, queue_id) (dev->pci_dev->intr_handle.intr_vec[queue_id]) +#define D2IH(dev) (Ð_DEV_PCI_DEV(dev)->intr_handle) +#define Q2V(dev, queue_id) (D2IH(dev)->intr_vec[queue_id]) /* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */ #define MAX_LPORT_NUM 128 @@ -711,7 +712,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct fm10k_macvlan_filter_info *macvlan; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = D2IH(dev); int i, ret; struct fm10k_rx_queue *rxq; uint64_t base_addr; @@ -1171,7 +1172,7 @@ static void fm10k_dev_stop(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = D2IH(dev); int i; PMD_INIT_FUNC_TRACE(); @@ -1387,6 +1388,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); PMD_INIT_FUNC_TRACE(); @@ -1396,7 +1398,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, dev_info->max_tx_queues = hw->mac.max_queues; dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM; dev_info->max_hash_mac_addrs = 0; - dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->max_vfs = pci_dev->max_vfs; dev_info->vmdq_pool_base = 0; dev_info->vmdq_queue_base = 0; dev_info->max_vmdq_pools = ETH_32_POOLS; @@ -2341,7 +2343,7 @@ fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) else FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)), FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(D2IH(dev)); return 0; } @@ -2364,7 +2366,7 @@ static int fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = D2IH(dev); uint32_t intr_vector, vec; uint16_t queue_id; int result = 0; @@ -2380,7 +2382,7 @@ fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) intr_vector = dev->data->nb_rx_queues; /* disable interrupt first */ - rte_intr_disable(&dev->pci_dev->intr_handle); + rte_intr_disable(intr_handle); if (hw->mac.type == fm10k_mac_pf) fm10k_dev_disable_intr_pf(dev); else @@ -2415,7 +2417,7 @@ fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) fm10k_dev_enable_intr_pf(dev); else fm10k_dev_enable_intr_vf(dev); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(intr_handle); hw->mac.ops.update_int_moderator(hw); return result; } @@ -2581,7 +2583,7 @@ fm10k_dev_interrupt_handler_pf( FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); /* Re-enable interrupt from host side */ - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(D2IH(dev)); } /** @@ -2615,7 +2617,7 @@ fm10k_dev_interrupt_handler_vf( FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); /* Re-enable interrupt from host side */ - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(D2IH(dev)); } /* Mailbox message handler in VF */ @@ -2827,6 +2829,7 @@ static int eth_fm10k_dev_init(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); int diag, i; struct fm10k_macvlan_filter_info *macvlan; @@ -2840,18 +2843,18 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - rte_eth_copy_pci_info(dev, dev->pci_dev); + rte_eth_copy_pci_info(dev, pci_dev); macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); memset(macvlan, 0, sizeof(*macvlan)); /* Vendor and Device ID need to be set before init of shared code */ memset(hw, 0, sizeof(*hw)); - hw->device_id = dev->pci_dev->id.device_id; - hw->vendor_id = dev->pci_dev->id.vendor_id; - hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id; - hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id; + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + hw->subsystem_device_id = pci_dev->id.subsystem_device_id; + hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; hw->revision_id = 0; - hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; if (hw->hw_addr == NULL) { PMD_INIT_LOG(ERR, "Bad mem resource." " Try to blacklist unused devices."); @@ -2921,20 +2924,20 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) /*PF/VF has different interrupt handling mechanism */ if (hw->mac.type == fm10k_mac_pf) { /* register callback func to eal lib */ - rte_intr_callback_register(&(dev->pci_dev->intr_handle), + rte_intr_callback_register(D2IH(dev), fm10k_dev_interrupt_handler_pf, (void *)dev); /* enable MISC interrupt */ fm10k_dev_enable_intr_pf(dev); } else { /* VF */ - rte_intr_callback_register(&(dev->pci_dev->intr_handle), + rte_intr_callback_register(D2IH(dev), fm10k_dev_interrupt_handler_vf, (void *)dev); fm10k_dev_enable_intr_vf(dev); } /* Enable intr after callback registered */ - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(D2IH(dev)); hw->mac.ops.update_int_moderator(hw); @@ -3004,7 +3007,7 @@ static int eth_fm10k_dev_uninit(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - + struct rte_intr_handle *intr_handle = D2IH(dev); PMD_INIT_FUNC_TRACE(); /* only uninitialize in the primary process */ @@ -3019,7 +3022,7 @@ eth_fm10k_dev_uninit(struct rte_eth_dev *dev) dev->tx_pkt_burst = NULL; /* disable uio/vfio intr */ - rte_intr_disable(&(dev->pci_dev->intr_handle)); + rte_intr_disable(intr_handle); /*PF/VF has different interrupt handling mechanism */ if (hw->mac.type == fm10k_mac_pf) { @@ -3027,13 +3030,13 @@ eth_fm10k_dev_uninit(struct rte_eth_dev *dev) fm10k_dev_disable_intr_pf(dev); /* unregister callback func to eal lib */ - rte_intr_callback_unregister(&(dev->pci_dev->intr_handle), + rte_intr_callback_unregister(intr_handle, fm10k_dev_interrupt_handler_pf, (void *)dev); } else { /* disable interrupt */ fm10k_dev_disable_intr_vf(dev); - rte_intr_callback_unregister(&(dev->pci_dev->intr_handle), + rte_intr_callback_unregister(intr_handle, fm10k_dev_interrupt_handler_vf, (void *)dev); } diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 67778ba..8a63a8c 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -907,7 +907,7 @@ is_floating_veb_supported(struct rte_devargs *devargs) static void config_floating_veb(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = dev->pci_dev; + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -952,7 +952,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) i40e_set_tx_function(dev); return 0; } - pci_dev = dev->pci_dev; + pci_dev = ETH_DEV_PCI_DEV(dev); rte_eth_copy_pci_info(dev, pci_dev); @@ -1215,7 +1215,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) return 0; hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - pci_dev = dev->pci_dev; + pci_dev = ETH_DEV_PCI_DEV(dev); if (hw->adapter_stopped == 0) i40e_dev_close(dev); @@ -1335,7 +1335,7 @@ void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t msix_vect = vsi->msix_intr; uint16_t i; @@ -1448,7 +1448,7 @@ void i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t msix_vect = vsi->msix_intr; uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd); @@ -1519,7 +1519,7 @@ static void i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t interval = i40e_calc_itr_interval(\ RTE_LIBRTE_I40E_ITR_INTERVAL); @@ -1550,7 +1550,7 @@ static void i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t msix_intr, i; @@ -1675,7 +1675,7 @@ i40e_dev_start(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vsi *main_vsi = pf->main_vsi; int ret, i; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); uint32_t intr_vector = 0; hw->adapter_stopped = 0; @@ -1808,7 +1808,7 @@ i40e_dev_stop(struct rte_eth_dev *dev) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_vsi *main_vsi = pf->main_vsi; struct i40e_mirror_rule *p_mirror; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); int i; /* Disable all queues */ @@ -1870,7 +1870,7 @@ i40e_dev_close(struct rte_eth_dev *dev) /* Disable interrupt */ i40e_pf_disable_irq0(hw); - rte_intr_disable(&(dev->pci_dev->intr_handle)); + rte_intr_disable(ETH_DEV_TO_INTR_HANDLE(dev)); /* shutdown and destroy the HMC */ i40e_shutdown_lan_hmc(hw); @@ -2588,7 +2588,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX; dev_info->max_mac_addrs = vsi->max_macaddrs; - dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->max_vfs = ETH_DEV_PCI_DEV(dev)->max_vfs; dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_QINQ_STRIP | @@ -3488,11 +3488,12 @@ i40e_get_cap(struct i40e_hw *hw) static int i40e_pf_parameter_init(struct rte_eth_dev *dev) { + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_PF_TO_HW(pf); uint16_t qp_count = 0, vsi_count = 0; - if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) { + if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) { PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV"); return -EINVAL; } @@ -3533,10 +3534,10 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) /* VF queue/VSI allocation */ pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps; - if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) { + if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) { pf->flags |= I40E_FLAG_SRIOV; pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF; - pf->vf_num = dev->pci_dev->max_vfs; + pf->vf_num = pci_dev->max_vfs; PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, " "in total %u queues", pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num); @@ -5573,7 +5574,7 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, done: /* Enable interrupt */ i40e_pf_enable_irq0(hw); - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(ETH_DEV_TO_INTR_HANDLE(dev)); } static int @@ -8124,10 +8125,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev, static void i40e_enable_extended_tag(struct rte_eth_dev *dev) { + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); uint32_t buf = 0; int ret; - ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf), + ret = rte_eal_pci_read_config(pci_dev, &buf, sizeof(buf), PCI_DEV_CAP_REG); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", @@ -8140,7 +8142,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev) } buf = 0; - ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf), + ret = rte_eal_pci_read_config(pci_dev, &buf, sizeof(buf), PCI_DEV_CTRL_REG); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", @@ -8152,7 +8154,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev) return; } buf |= PCI_DEV_CTRL_EXT_TAG_MASK; - ret = rte_eal_pci_write_config(dev->pci_dev, &buf, sizeof(buf), + ret = rte_eal_pci_write_config(pci_dev, &buf, sizeof(buf), PCI_DEV_CTRL_REG); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x", @@ -9555,7 +9557,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev, static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t interval = i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); @@ -9580,7 +9582,7 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)); I40E_WRITE_FLUSH(hw); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(ETH_DEV_TO_INTR_HANDLE(dev)); return 0; } @@ -9588,7 +9590,7 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t msix_intr; diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 298cef4..9d4bea7 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -671,6 +671,10 @@ i40e_get_vsi_from_adapter(struct i40e_adapter *adapter) #define I40E_VF_TO_HW(vf) \ (&(((struct i40e_vf *)vf)->adapter->hw)) +/* ETH_DEV_TO_INTR_HANDLE */ +#define ETH_DEV_TO_INTR_HANDLE(ptr) \ + (&(ETH_DEV_PCI_DEV(ptr)->intr_handle)) + static inline void i40e_init_adminq_parameter(struct i40e_hw *hw) { diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index aa306d6..781e658 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -718,7 +718,7 @@ i40evf_config_irq_map(struct rte_eth_dev *dev) uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \ sizeof(struct i40e_virtchnl_vector_map)]; struct i40e_virtchnl_irq_map_info *map_info; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); uint32_t vector_id; int i, err; @@ -1431,7 +1431,7 @@ i40evf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, done: i40evf_enable_irq0(hw); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(ETH_DEV_TO_INTR_HANDLE(dev)); } static int @@ -1439,7 +1439,7 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\ eth_dev->data->dev_private); - struct rte_pci_device *pci_dev = eth_dev->pci_dev; + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(eth_dev); PMD_INIT_FUNC_TRACE(); @@ -1458,15 +1458,15 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) return 0; } - rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev); + rte_eth_copy_pci_info(eth_dev, pci_dev); - hw->vendor_id = eth_dev->pci_dev->id.vendor_id; - hw->device_id = eth_dev->pci_dev->id.device_id; - hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id; - hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id; - hw->bus.device = eth_dev->pci_dev->addr.devid; - hw->bus.func = eth_dev->pci_dev->addr.function; - hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr; + hw->vendor_id = pci_dev->id.vendor_id; + hw->device_id = pci_dev->id.device_id; + hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; + hw->subsystem_device_id = pci_dev->id.subsystem_device_id; + hw->bus.device = pci_dev->addr.devid; + hw->bus.func = pci_dev->addr.function; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; hw->adapter_stopped = 0; if(i40evf_init_vf(eth_dev) != 0) { @@ -1853,7 +1853,7 @@ i40evf_enable_queues_intr(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); if (!rte_intr_allow_others(intr_handle)) { I40E_WRITE_REG(hw, @@ -1885,7 +1885,7 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); if (!rte_intr_allow_others(intr_handle)) { I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, @@ -1911,7 +1911,7 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev) static int i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t interval = i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); @@ -1937,7 +1937,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) I40EVF_WRITE_FLUSH(hw); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(ETH_DEV_TO_INTR_HANDLE(dev)); return 0; } @@ -1945,7 +1945,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) static int i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t msix_intr; @@ -2025,7 +2025,7 @@ i40evf_dev_start(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); uint32_t intr_vector = 0; PMD_INIT_FUNC_TRACE(); @@ -2090,7 +2090,7 @@ i40evf_dev_start(struct rte_eth_dev *dev) static void i40evf_dev_stop(struct rte_eth_dev *dev) { - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); PMD_INIT_FUNC_TRACE(); @@ -2285,7 +2285,7 @@ static void i40evf_dev_close(struct rte_eth_dev *dev) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_pci_device *pci_dev = dev->pci_dev; + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); i40evf_dev_stop(dev); hw->adapter_stopped = 1; diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index edc9b22..f17da46 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -427,6 +427,9 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, (r) = (h)->bitmap[idx] >> bit & 1;\ } while (0) +#define ETH_DEV_TO_INTR_HANDLE(ptr) \ + (&(ETH_DEV_PCI_DEV(ptr)->intr_handle)) + /* * The set of PCI devices this driver supports */ @@ -1127,7 +1130,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) return 0; } - pci_dev = eth_dev->pci_dev; + pci_dev = ETH_DEV_PCI_DEV(eth_dev); rte_eth_copy_pci_info(eth_dev, pci_dev); @@ -1302,7 +1305,7 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) return -EPERM; hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - pci_dev = eth_dev->pci_dev; + pci_dev = ETH_DEV_PCI_DEV(eth_dev); if (hw->adapter_stopped == 0) ixgbe_dev_close(eth_dev); @@ -1419,7 +1422,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) return 0; } - pci_dev = eth_dev->pci_dev; + pci_dev = ETH_DEV_PCI_DEV(eth_dev); rte_eth_copy_pci_info(eth_dev, pci_dev); @@ -1532,7 +1535,9 @@ static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) { struct ixgbe_hw *hw; - struct rte_pci_device *pci_dev = eth_dev->pci_dev; + struct rte_pci_device *pci_dev; + + pci_dev = ETH_DEV_PCI_DEV(eth_dev); PMD_INIT_FUNC_TRACE(); @@ -1960,7 +1965,8 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) } RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q; - RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q; + RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = + ETH_DEV_PCI_DEV(dev)->max_vfs * nb_rx_q; return 0; } @@ -2191,7 +2197,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t intr_vector = 0; int err, link_up = 0, negotiate = 0; uint32_t speed = 0; @@ -2291,7 +2298,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) /* Restore vf rate limit */ if (vfinfo != NULL) { - for (vf = 0; vf < dev->pci_dev->max_vfs; vf++) + for (vf = 0; vf < pci_dev->max_vfs; vf++) for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) if (vfinfo[vf].tx_rate[idx] != 0) ixgbe_set_vf_rate_limit(dev, vf, @@ -2408,7 +2415,8 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) struct ixgbe_filter_info *filter_info = IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int vf; PMD_INIT_FUNC_TRACE(); @@ -2424,7 +2432,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) ixgbe_stop_adapter(hw); for (vf = 0; vfinfo != NULL && - vf < dev->pci_dev->max_vfs; vf++) + vf < pci_dev->max_vfs; vf++) vfinfo[vf].clear_to_send = false; if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { @@ -3033,6 +3041,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; @@ -3049,7 +3058,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ dev_info->max_mac_addrs = hw->mac.num_rar_entries; dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; - dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->max_vfs = pci_dev->max_vfs; if (hw->mac.type == ixgbe_mac_82598EB) dev_info->max_vmdq_pools = ETH_16_POOLS; else @@ -3164,6 +3173,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; @@ -3171,7 +3181,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */ dev_info->max_mac_addrs = hw->mac.num_rar_entries; dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; - dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->max_vfs = pci_dev->max_vfs; if (hw->mac.type == ixgbe_mac_82598EB) dev_info->max_vmdq_pools = ETH_16_POOLS; else @@ -3434,6 +3444,7 @@ static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev) { struct rte_eth_link link; + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); memset(&link, 0, sizeof(link)); rte_ixgbe_dev_atomic_read_link_status(dev, &link); @@ -3448,10 +3459,10 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev) (int)(dev->data->port_id)); } PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, - dev->pci_dev->addr.domain, - dev->pci_dev->addr.bus, - dev->pci_dev->addr.devid, - dev->pci_dev->addr.function); + pci_dev->addr.domain, + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); } /* @@ -3515,7 +3526,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) } else { PMD_DRV_LOG(DEBUG, "enable intr immediately"); ixgbe_enable_intr(dev); - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(ETH_DEV_TO_INTR_HANDLE(dev)); } @@ -3564,7 +3575,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param) PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); ixgbe_enable_intr(dev); - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(ETH_DEV_TO_INTR_HANDLE(dev)); } /** @@ -4196,7 +4207,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t intr_vector = 0; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); int err, mask = 0; @@ -4259,7 +4270,7 @@ static void ixgbevf_dev_stop(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); PMD_INIT_FUNC_TRACE(); @@ -5070,7 +5081,7 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) RTE_SET_USED(queue_id); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(ETH_DEV_TO_INTR_HANDLE(dev)); return 0; } @@ -5112,7 +5123,7 @@ ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) mask &= (1 << (queue_id - 32)); IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); } - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(ETH_DEV_TO_INTR_HANDLE(dev)); return 0; } @@ -5216,7 +5227,7 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, static void ixgbevf_configure_msix(struct rte_eth_dev *dev) { - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t q_idx; @@ -5249,7 +5260,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) static void ixgbe_configure_msix(struct rte_eth_dev *dev) { - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = ETH_DEV_TO_INTR_HANDLE(dev); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t queue_id, base = IXGBE_MISC_VEC_ID; @@ -5381,7 +5392,8 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, return -EINVAL; if (vfinfo != NULL) { - for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) { + for (vf_idx = 0; vf_idx < ETH_DEV_PCI_DEV(dev)->max_vfs; + vf_idx++) { if (vf_idx == vf) continue; for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); @@ -7197,12 +7209,13 @@ ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev, int ret = 0; uint32_t vmtir, vmvir; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(dev); - if (l2_tunnel->vf_id >= dev->pci_dev->max_vfs) { + if (l2_tunnel->vf_id >= pci_dev->max_vfs) { PMD_DRV_LOG(ERR, "VF id %u should be less than %u", l2_tunnel->vf_id, - dev->pci_dev->max_vfs); + pci_dev->max_vfs); return -EINVAL; } diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c index 26395e4..139d816 100644 --- a/drivers/net/ixgbe/ixgbe_pf.c +++ b/drivers/net/ixgbe/ixgbe_pf.c @@ -61,7 +61,7 @@ static inline uint16_t dev_num_vf(struct rte_eth_dev *eth_dev) { - return eth_dev->pci_dev->max_vfs; + return ETH_DEV_PCI_DEV(eth_dev)->max_vfs; } static inline diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index d106dd0..959ff0f 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -178,11 +178,12 @@ static void qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param) { struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(eth_dev); struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; qede_interrupt_action(ECORE_LEADING_HWFN(edev)); - if (rte_intr_enable(ð_dev->pci_dev->intr_handle)) + if (rte_intr_enable(&pci_dev->intr_handle)) DP_ERR(edev, "rte_intr_enable failed\n"); } @@ -809,6 +810,7 @@ static void qede_poll_sp_sb_cb(void *param) static void qede_dev_close(struct rte_eth_dev *eth_dev) { + struct rte_pci_device *pci_dev = ETH_DEV_PCI_DEV(eth_dev); struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); int rc; @@ -835,9 +837,9 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev) qdev->ops->common->remove(edev); - rte_intr_disable(ð_dev->pci_dev->intr_handle); + rte_intr_disable(&pci_dev->intr_handle); - rte_intr_callback_unregister(ð_dev->pci_dev->intr_handle, + rte_intr_callback_unregister(&pci_dev->intr_handle, qede_interrupt_handler, (void *)eth_dev); if (edev->num_hwfns > 1) @@ -1403,7 +1405,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) /* Extract key data structures */ adapter = eth_dev->data->dev_private; edev = &adapter->edev; - pci_addr = eth_dev->pci_dev->addr; + pci_dev = ETH_DEV_PCI_DEV(eth_dev); + pci_addr = pci_dev->addr; PMD_INIT_FUNC_TRACE(edev); @@ -1420,8 +1423,6 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) return 0; } - pci_dev = eth_dev->pci_dev; - rte_eth_copy_pci_info(eth_dev, pci_dev); qed_ops = qed_get_eth_ops(); @@ -1442,10 +1443,10 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) qede_update_pf_params(edev); - rte_intr_callback_register(ð_dev->pci_dev->intr_handle, + rte_intr_callback_register(&pci_dev->intr_handle, qede_interrupt_handler, (void *)eth_dev); - if (rte_intr_enable(ð_dev->pci_dev->intr_handle)) { + if (rte_intr_enable(&pci_dev->intr_handle)) { DP_ERR(edev, "rte_intr_enable() failed\n"); return -ENODEV; } diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c index 8bb13e5..bcb3751 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethdev.c +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c @@ -138,7 +138,7 @@ gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size, const struct rte_memzone *mz; snprintf(z_name, sizeof(z_name), "%s_%d_%s", - dev->driver->pci_drv.driver.name, dev->data->port_id, post_string); + dev->data->drv_name, dev->data->port_id, post_string); mz = rte_memzone_lookup(z_name); if (!reuse) { @@ -237,7 +237,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev) eth_dev->dev_ops = &vmxnet3_eth_dev_ops; eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts; eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts; - pci_dev = eth_dev->pci_dev; + pci_dev = ETH_DEV_PCI_DEV(eth_dev); /* * for secondary processes, we don't initialize any further as primary -- 2.7.4