From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id C197A8D3A for ; Tue, 7 Jun 2016 07:47:13 +0200 (CEST) Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by fmsmga104.fm.intel.com with ESMTP; 06 Jun 2016 22:47:12 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.26,431,1459839600"; d="scan'208";a="715045328" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by FMSMGA003.fm.intel.com with ESMTP; 06 Jun 2016 22:47:13 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id u575l9sv007872; Tue, 7 Jun 2016 13:47:09 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id u575l6q5005065; Tue, 7 Jun 2016 13:47:08 +0800 Received: (from zhetao@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id u575l6xs005061; Tue, 7 Jun 2016 13:47:06 +0800 From: Zhe Tao To: dev@dpdk.org Cc: wenzhuo.lu@intel.com, zhe.tao@intel.com, konstantin.ananyev@intel.com, bruce.richardson@intel.com, jing.d.chen@intel.com, cunming.liang@intel.com, jingjing.wu@intel.com, helin.zhang@intel.com Date: Tue, 7 Jun 2016 13:45:15 +0800 Message-Id: <1465278318-4949-6-git-send-email-zhe.tao@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1465278318-4949-1-git-send-email-zhe.tao@intel.com> References: <1465278318-4949-1-git-send-email-zhe.tao@intel.com> Subject: [dpdk-dev] [PATCH v2 5/8] igb: RX/TX with lock on VF X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 07 Jun 2016 05:47:14 -0000 From: Wenzhuo Lu Add RX/TX paths with lock for VF. It's used when the function of link reset on VF is needed. When the lock for RX/TX is added, the RX/TX can be stopped. Then we have a chance to reset the VF link. Please be aware there's performence drop if the lock path is chosen. Signed-off-by: Wenzhuo Lu Signed-off-by: zhe.tao --- drivers/net/e1000/e1000_ethdev.h | 10 ++++++++++ drivers/net/e1000/igb_ethdev.c | 14 +++++++++++--- drivers/net/e1000/igb_rxtx.c | 26 +++++++++++++++++++++----- 3 files changed, 42 insertions(+), 8 deletions(-) diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h index e8bf8da..6a42994 100644 --- a/drivers/net/e1000/e1000_ethdev.h +++ b/drivers/net/e1000/e1000_ethdev.h @@ -319,6 +319,16 @@ uint16_t eth_igb_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t eth_igb_recv_scattered_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t eth_igb_xmit_pkts_lock(void *txq, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t eth_igb_recv_pkts_lock(void *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t eth_igb_recv_scattered_pkts_lock(void *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + int eth_igb_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index b0e5e6a..8aad741 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -909,15 +909,17 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); eth_dev->dev_ops = &igbvf_eth_dev_ops; - eth_dev->rx_pkt_burst = ð_igb_recv_pkts; - eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; + eth_dev->rx_pkt_burst = RX_LOCK_FUNCTION(eth_dev, eth_igb_recv_pkts); + eth_dev->tx_pkt_burst = TX_LOCK_FUNCTION(eth_dev, eth_igb_xmit_pkts); /* for secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different * RX function */ if (rte_eal_process_type() != RTE_PROC_PRIMARY){ if (eth_dev->data->scattered_rx) - eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts; + eth_dev->rx_pkt_burst = + RX_LOCK_FUNCTION(eth_dev, + eth_igb_recv_scattered_pkts); return 0; } @@ -1999,7 +2001,13 @@ eth_igb_supported_ptypes_get(struct rte_eth_dev *dev) }; if (dev->rx_pkt_burst == eth_igb_recv_pkts || +#ifndef RTE_NEXT_ABI dev->rx_pkt_burst == eth_igb_recv_scattered_pkts) +#else + dev->rx_pkt_burst == eth_igb_recv_scattered_pkts || + dev->rx_pkt_burst == eth_igb_recv_pkts_lock || + dev->rx_pkt_burst == eth_igb_recv_scattered_pkts_lock) +#endif return ptypes; return NULL; } diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index 18aeead..7e97330 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -67,6 +67,7 @@ #include #include #include +#include #include "e1000_logs.h" #include "base/e1000_api.h" @@ -107,6 +108,7 @@ struct igb_rx_queue { struct igb_rx_entry *sw_ring; /**< address of RX software ring. */ struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + rte_spinlock_t rx_lock; /**< Lock for packet receiption. */ uint16_t nb_rx_desc; /**< number of RX descriptors. */ uint16_t rx_tail; /**< current value of RDT register. */ uint16_t nb_rx_hold; /**< number of held free RX desc. */ @@ -174,6 +176,7 @@ struct igb_tx_queue { volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */ uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */ struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */ + rte_spinlock_t tx_lock; /**< Lock for packet transmission. */ volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */ uint32_t txd_type; /**< Device-specific TXD type */ uint16_t nb_tx_desc; /**< number of TX descriptors. */ @@ -615,6 +618,8 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } +GENERATE_TX_LOCK(eth_igb_xmit_pkts, igb) + /********************************************************************* * * RX functions @@ -931,6 +936,8 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return nb_rx; } +GENERATE_RX_LOCK(eth_igb_recv_pkts, igb) + uint16_t eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -1186,6 +1193,8 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return nb_rx; } +GENERATE_RX_LOCK(eth_igb_recv_scattered_pkts, igb) + /* * Maximum number of Ring Descriptors. * @@ -1344,6 +1353,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); txq->port_id = dev->data->port_id; + rte_spinlock_init(&txq->tx_lock); txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx)); txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); @@ -1361,7 +1371,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); igb_reset_tx_queue(txq, dev); - dev->tx_pkt_burst = eth_igb_xmit_pkts; + dev->tx_pkt_burst = TX_LOCK_FUNCTION(dev, eth_igb_xmit_pkts); dev->data->tx_queues[queue_idx] = txq; return 0; @@ -1467,6 +1477,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, rxq->port_id = dev->data->port_id; rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN); + rte_spinlock_init(&rxq->rx_lock); /* * Allocate RX ring hardware descriptors. A memzone large enough to @@ -2323,7 +2334,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) /* Configure and enable each RX queue. */ rctl_bsize = 0; - dev->rx_pkt_burst = eth_igb_recv_pkts; + dev->rx_pkt_burst = RX_LOCK_FUNCTION(dev, eth_igb_recv_pkts); for (i = 0; i < dev->data->nb_rx_queues; i++) { uint64_t bus_addr; uint32_t rxdctl; @@ -2370,7 +2381,9 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); - dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; + dev->rx_pkt_burst = + RX_LOCK_FUNCTION(dev, + eth_igb_recv_scattered_pkts); dev->data->scattered_rx = 1; } } else { @@ -2381,7 +2394,9 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) rctl_bsize = buf_size; if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); - dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; + dev->rx_pkt_burst = + RX_LOCK_FUNCTION(dev, + eth_igb_recv_scattered_pkts); dev->data->scattered_rx = 1; } @@ -2414,7 +2429,8 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) if (dev->data->dev_conf.rxmode.enable_scatter) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); - dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; + dev->rx_pkt_burst = + RX_LOCK_FUNCTION(dev, eth_igb_recv_scattered_pkts); dev->data->scattered_rx = 1; } -- 2.1.4