DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: Jiawen Wu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH v3 21/56] net/txgbe: add Rx and Tx queues setup and release
Date: Wed, 14 Oct 2020 13:54:42 +0800
Message-ID: <20201014055517.1214386-22-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20201014055517.1214386-1-jiawenwu@trustnetic.com>

Add receive and transmit queues setup and release.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/txgbe/txgbe_ethdev.c |   4 +
 drivers/net/txgbe/txgbe_ethdev.h |  13 +
 drivers/net/txgbe/txgbe_rxtx.c   | 510 +++++++++++++++++++++++++++++++
 drivers/net/txgbe/txgbe_rxtx.h   | 130 ++++++++
 4 files changed, 657 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 1395f6ffe..6186cace1 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -1322,6 +1322,10 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
 	.dev_infos_get              = txgbe_dev_info_get,
 	.dev_set_link_up            = txgbe_dev_set_link_up,
 	.dev_set_link_down          = txgbe_dev_set_link_down,
+	.rx_queue_setup             = txgbe_dev_rx_queue_setup,
+	.rx_queue_release           = txgbe_dev_rx_queue_release,
+	.tx_queue_setup             = txgbe_dev_tx_queue_setup,
+	.tx_queue_release           = txgbe_dev_tx_queue_release,
 	.mac_addr_add               = txgbe_add_rar,
 	.mac_addr_remove            = txgbe_remove_rar,
 	.mac_addr_set               = txgbe_set_default_mac_addr,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 096b17673..6636b6e9a 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -80,6 +80,19 @@ struct txgbe_adapter {
 /*
  * RX/TX function prototypes
  */
+void txgbe_dev_rx_queue_release(void *rxq);
+
+void txgbe_dev_tx_queue_release(void *txq);
+
+int  txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+		uint16_t nb_rx_desc, unsigned int socket_id,
+		const struct rte_eth_rxconf *rx_conf,
+		struct rte_mempool *mb_pool);
+
+int  txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+		uint16_t nb_tx_desc, unsigned int socket_id,
+		const struct rte_eth_txconf *tx_conf);
+
 int txgbe_dev_rx_init(struct rte_eth_dev *dev);
 
 void txgbe_dev_tx_init(struct rte_eth_dev *dev);
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index eadc06bcf..707d5b2e4 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -7,10 +7,14 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <errno.h>
 
 #include <rte_common.h>
 #include <rte_ethdev.h>
 #include <rte_ethdev_driver.h>
+#include <rte_memzone.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
 #include <rte_mbuf.h>
 
 #include "txgbe_logs.h"
@@ -31,6 +35,10 @@ txgbe_is_vf(struct rte_eth_dev *dev)
 	}
 }
 
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 32
+#endif
+
 uint64_t
 txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
 {
@@ -73,6 +81,57 @@ txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	return offloads;
 }
 
+static void __rte_cold
+txgbe_tx_queue_release_mbufs(struct txgbe_tx_queue *txq)
+{
+	unsigned int i;
+
+	if (txq->sw_ring != NULL) {
+		for (i = 0; i < txq->nb_tx_desc; i++) {
+			if (txq->sw_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+				txq->sw_ring[i].mbuf = NULL;
+			}
+		}
+	}
+}
+
+static void __rte_cold
+txgbe_tx_free_swring(struct txgbe_tx_queue *txq)
+{
+	if (txq != NULL &&
+	    txq->sw_ring != NULL)
+		rte_free(txq->sw_ring);
+}
+
+static void __rte_cold
+txgbe_tx_queue_release(struct txgbe_tx_queue *txq)
+{
+	if (txq != NULL && txq->ops != NULL) {
+		txq->ops->release_mbufs(txq);
+		txq->ops->free_swring(txq);
+		rte_free(txq);
+	}
+}
+
+void __rte_cold
+txgbe_dev_tx_queue_release(void *txq)
+{
+	txgbe_tx_queue_release(txq);
+}
+
+static const struct txgbe_txq_ops def_txq_ops = {
+	.release_mbufs = txgbe_tx_queue_release_mbufs,
+	.free_swring = txgbe_tx_free_swring,
+};
+
+void __rte_cold
+txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(txq);
+}
+
 uint64_t
 txgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
 {
@@ -112,6 +171,457 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	return tx_offload_capa;
 }
 
+int __rte_cold
+txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
+			 uint16_t queue_idx,
+			 uint16_t nb_desc,
+			 unsigned int socket_id,
+			 const struct rte_eth_txconf *tx_conf)
+{
+	const struct rte_memzone *tz;
+	struct txgbe_tx_queue *txq;
+	struct txgbe_hw     *hw;
+	uint16_t tx_free_thresh;
+	uint64_t offloads;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = TXGBE_DEV_HW(dev);
+
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+	/*
+	 * Validate number of transmit descriptors.
+	 * It must not exceed hardware maximum, and must be multiple
+	 * of TXGBE_ALIGN.
+	 */
+	if (nb_desc % TXGBE_TXD_ALIGN != 0 ||
+	    nb_desc > TXGBE_RING_DESC_MAX ||
+	    nb_desc < TXGBE_RING_DESC_MIN) {
+		return -EINVAL;
+	}
+
+	/*
+	 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
+	 * descriptors are used or if the number of descriptors required
+	 * to transmit a packet is greater than the number of free TX
+	 * descriptors.
+	 * One descriptor in the TX ring is used as a sentinel to avoid a
+	 * H/W race condition, hence the maximum threshold constraints.
+	 * When set to zero use default values.
+	 */
+	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+			tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+	if (tx_free_thresh >= (nb_desc - 3)) {
+		PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the number of "
+			     "TX descriptors minus 3. (tx_free_thresh=%u "
+			     "port=%d queue=%d)",
+			     (unsigned int)tx_free_thresh,
+			     (int)dev->data->port_id, (int)queue_idx);
+		return -(EINVAL);
+	}
+
+	if ((nb_desc % tx_free_thresh) != 0) {
+		PMD_INIT_LOG(ERR, "tx_free_thresh must be a divisor of the "
+			     "number of TX descriptors. (tx_free_thresh=%u "
+			     "port=%d queue=%d)", (unsigned int)tx_free_thresh,
+			     (int)dev->data->port_id, (int)queue_idx);
+		return -(EINVAL);
+	}
+
+	/* Free memory prior to re-allocation if needed... */
+	if (dev->data->tx_queues[queue_idx] != NULL) {
+		txgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
+		dev->data->tx_queues[queue_idx] = NULL;
+	}
+
+	/* First allocate the tx queue data structure */
+	txq = rte_zmalloc_socket("ethdev TX queue",
+				 sizeof(struct txgbe_tx_queue),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq == NULL)
+		return -ENOMEM;
+
+	/*
+	 * Allocate TX ring hardware descriptors. A memzone large enough to
+	 * handle the maximum ring size is allocated in order to allow for
+	 * resizing in later calls to the queue setup function.
+	 */
+	tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+			sizeof(struct txgbe_tx_desc) * TXGBE_RING_DESC_MAX,
+			TXGBE_ALIGN, socket_id);
+	if (tz == NULL) {
+		txgbe_tx_queue_release(txq);
+		return -ENOMEM;
+	}
+
+	txq->nb_tx_desc = nb_desc;
+	txq->tx_free_thresh = tx_free_thresh;
+	txq->pthresh = tx_conf->tx_thresh.pthresh;
+	txq->hthresh = tx_conf->tx_thresh.hthresh;
+	txq->wthresh = tx_conf->tx_thresh.wthresh;
+	txq->queue_id = queue_idx;
+	txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+	txq->port_id = dev->data->port_id;
+	txq->offloads = offloads;
+	txq->ops = &def_txq_ops;
+	txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+	/* Modification to set tail pointer for virtual function
+	 * if vf is detected.
+	 */
+	if (hw->mac.type == txgbe_mac_raptor_vf) {
+		txq->tdt_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXWP(queue_idx));
+		txq->tdc_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXCFG(queue_idx));
+	} else {
+		txq->tdt_reg_addr = TXGBE_REG_ADDR(hw,
+						TXGBE_TXWP(txq->reg_idx));
+		txq->tdc_reg_addr = TXGBE_REG_ADDR(hw,
+						TXGBE_TXCFG(txq->reg_idx));
+	}
+
+	txq->tx_ring_phys_addr = TMZ_PADDR(tz);
+	txq->tx_ring = (struct txgbe_tx_desc *)TMZ_VADDR(tz);
+
+	/* Allocate software ring */
+	txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
+				sizeof(struct txgbe_tx_entry) * nb_desc,
+				RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq->sw_ring == NULL) {
+		txgbe_tx_queue_release(txq);
+		return -ENOMEM;
+	}
+	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
+		     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+
+	/* set up scalar TX function as appropriate */
+	txgbe_set_tx_function(dev, txq);
+
+	txq->ops->reset(txq);
+
+	dev->data->tx_queues[queue_idx] = txq;
+
+	return 0;
+}
+
+/**
+ * txgbe_free_sc_cluster - free the not-yet-completed scattered cluster
+ *
+ * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
+ * in the sw_rsc_ring is not set to NULL but rather points to the next
+ * mbuf of this RSC aggregation (that has not been completed yet and still
+ * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
+ * will just free first "nb_segs" segments of the cluster explicitly by calling
+ * an rte_pktmbuf_free_seg().
+ *
+ * @m scattered cluster head
+ */
+static void __rte_cold
+txgbe_free_sc_cluster(struct rte_mbuf *m)
+{
+	uint16_t i, nb_segs = m->nb_segs;
+	struct rte_mbuf *next_seg;
+
+	for (i = 0; i < nb_segs; i++) {
+		next_seg = m->next;
+		rte_pktmbuf_free_seg(m);
+		m = next_seg;
+	}
+}
+
+static void __rte_cold
+txgbe_rx_queue_release_mbufs(struct txgbe_rx_queue *rxq)
+{
+	unsigned int i;
+
+	if (rxq->sw_ring != NULL) {
+		for (i = 0; i < rxq->nb_rx_desc; i++) {
+			if (rxq->sw_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+				rxq->sw_ring[i].mbuf = NULL;
+			}
+		}
+		if (rxq->rx_nb_avail) {
+			for (i = 0; i < rxq->rx_nb_avail; ++i) {
+				struct rte_mbuf *mb;
+
+				mb = rxq->rx_stage[rxq->rx_next_avail + i];
+				rte_pktmbuf_free_seg(mb);
+			}
+			rxq->rx_nb_avail = 0;
+		}
+	}
+
+	if (rxq->sw_sc_ring)
+		for (i = 0; i < rxq->nb_rx_desc; i++)
+			if (rxq->sw_sc_ring[i].fbuf) {
+				txgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
+				rxq->sw_sc_ring[i].fbuf = NULL;
+			}
+}
+
+static void __rte_cold
+txgbe_rx_queue_release(struct txgbe_rx_queue *rxq)
+{
+	if (rxq != NULL) {
+		txgbe_rx_queue_release_mbufs(rxq);
+		rte_free(rxq->sw_ring);
+		rte_free(rxq->sw_sc_ring);
+		rte_free(rxq);
+	}
+}
+
+void __rte_cold
+txgbe_dev_rx_queue_release(void *rxq)
+{
+	txgbe_rx_queue_release(rxq);
+}
+
+/*
+ * Check if Rx Burst Bulk Alloc function can be used.
+ * Return
+ *        0: the preconditions are satisfied and the bulk allocation function
+ *           can be used.
+ *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst
+ *           function must be used.
+ */
+static inline int __rte_cold
+check_rx_burst_bulk_alloc_preconditions(struct txgbe_rx_queue *rxq)
+{
+	int ret = 0;
+
+	/*
+	 * Make sure the following pre-conditions are satisfied:
+	 *   rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST
+	 *   rxq->rx_free_thresh < rxq->nb_rx_desc
+	 *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
+	 * Scattered packets are not supported.  This should be checked
+	 * outside of this function.
+	 */
+	if (!(rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST)) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->rx_free_thresh=%d, "
+			     "RTE_PMD_TXGBE_RX_MAX_BURST=%d",
+			     rxq->rx_free_thresh, RTE_PMD_TXGBE_RX_MAX_BURST);
+		ret = -EINVAL;
+	} else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->rx_free_thresh=%d, "
+			     "rxq->nb_rx_desc=%d",
+			     rxq->rx_free_thresh, rxq->nb_rx_desc);
+		ret = -EINVAL;
+	} else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->nb_rx_desc=%d, "
+			     "rxq->rx_free_thresh=%d",
+			     rxq->nb_rx_desc, rxq->rx_free_thresh);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+/* Reset dynamic txgbe_rx_queue fields back to defaults */
+static void __rte_cold
+txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq)
+{
+	static const struct txgbe_rx_desc zeroed_desc = {
+						{{0}, {0} }, {{0}, {0} } };
+	unsigned int i;
+	uint16_t len = rxq->nb_rx_desc;
+
+	/*
+	 * By default, the Rx queue setup function allocates enough memory for
+	 * TXGBE_RING_DESC_MAX.  The Rx Burst bulk allocation function requires
+	 * extra memory at the end of the descriptor ring to be zero'd out.
+	 */
+	if (adapter->rx_bulk_alloc_allowed)
+		/* zero out extra memory */
+		len += RTE_PMD_TXGBE_RX_MAX_BURST;
+
+	/*
+	 * Zero out HW ring memory. Zero out extra memory at the end of
+	 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
+	 * reads extra memory as zeros.
+	 */
+	for (i = 0; i < len; i++)
+		rxq->rx_ring[i] = zeroed_desc;
+
+	/*
+	 * initialize extra software ring entries. Space for these extra
+	 * entries is always allocated
+	 */
+	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+	for (i = rxq->nb_rx_desc; i < len; ++i)
+		rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
+
+	rxq->rx_nb_avail = 0;
+	rxq->rx_next_avail = 0;
+	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+	rxq->rx_tail = 0;
+	rxq->nb_rx_hold = 0;
+	rxq->pkt_first_seg = NULL;
+	rxq->pkt_last_seg = NULL;
+}
+
+int __rte_cold
+txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
+			 uint16_t queue_idx,
+			 uint16_t nb_desc,
+			 unsigned int socket_id,
+			 const struct rte_eth_rxconf *rx_conf,
+			 struct rte_mempool *mp)
+{
+	const struct rte_memzone *rz;
+	struct txgbe_rx_queue *rxq;
+	struct txgbe_hw     *hw;
+	uint16_t len;
+	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+	uint64_t offloads;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = TXGBE_DEV_HW(dev);
+
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+	/*
+	 * Validate number of receive descriptors.
+	 * It must not exceed hardware maximum, and must be multiple
+	 * of TXGBE_ALIGN.
+	 */
+	if (nb_desc % TXGBE_RXD_ALIGN != 0 ||
+			nb_desc > TXGBE_RING_DESC_MAX ||
+			nb_desc < TXGBE_RING_DESC_MIN) {
+		return -EINVAL;
+	}
+
+	/* Free memory prior to re-allocation if needed... */
+	if (dev->data->rx_queues[queue_idx] != NULL) {
+		txgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
+		dev->data->rx_queues[queue_idx] = NULL;
+	}
+
+	/* First allocate the rx queue data structure */
+	rxq = rte_zmalloc_socket("ethdev RX queue",
+				 sizeof(struct txgbe_rx_queue),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (rxq == NULL)
+		return -ENOMEM;
+	rxq->mb_pool = mp;
+	rxq->nb_rx_desc = nb_desc;
+	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+	rxq->queue_id = queue_idx;
+	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+	rxq->port_id = dev->data->port_id;
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		rxq->crc_len = RTE_ETHER_CRC_LEN;
+	else
+		rxq->crc_len = 0;
+	rxq->drop_en = rx_conf->rx_drop_en;
+	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+	rxq->offloads = offloads;
+
+	/*
+	 * The packet type in RX descriptor is different for different NICs.
+	 * So set different masks for different NICs.
+	 */
+	rxq->pkt_type_mask = TXGBE_PTID_MASK;
+
+	/*
+	 * Allocate RX ring hardware descriptors. A memzone large enough to
+	 * handle the maximum ring size is allocated in order to allow for
+	 * resizing in later calls to the queue setup function.
+	 */
+	rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+				      RX_RING_SZ, TXGBE_ALIGN, socket_id);
+	if (rz == NULL) {
+		txgbe_rx_queue_release(rxq);
+		return -ENOMEM;
+	}
+
+	/*
+	 * Zero init all the descriptors in the ring.
+	 */
+	memset(rz->addr, 0, RX_RING_SZ);
+
+	/*
+	 * Modified to setup VFRDT for Virtual Function
+	 */
+	if (hw->mac.type == txgbe_mac_raptor_vf) {
+		rxq->rdt_reg_addr =
+			TXGBE_REG_ADDR(hw, TXGBE_RXWP(queue_idx));
+		rxq->rdh_reg_addr =
+			TXGBE_REG_ADDR(hw, TXGBE_RXRP(queue_idx));
+	} else {
+		rxq->rdt_reg_addr =
+			TXGBE_REG_ADDR(hw, TXGBE_RXWP(rxq->reg_idx));
+		rxq->rdh_reg_addr =
+			TXGBE_REG_ADDR(hw, TXGBE_RXRP(rxq->reg_idx));
+	}
+
+	rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
+	rxq->rx_ring = (struct txgbe_rx_desc *)TMZ_VADDR(rz);
+
+	/*
+	 * Certain constraints must be met in order to use the bulk buffer
+	 * allocation Rx burst function. If any of Rx queues doesn't meet them
+	 * the feature should be disabled for the whole port.
+	 */
+	if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
+		PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
+				    "preconditions - canceling the feature for "
+				    "the whole port[%d]",
+			     rxq->queue_id, rxq->port_id);
+		adapter->rx_bulk_alloc_allowed = false;
+	}
+
+	/*
+	 * Allocate software ring. Allow for space at the end of the
+	 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
+	 * function does not access an invalid memory region.
+	 */
+	len = nb_desc;
+	if (adapter->rx_bulk_alloc_allowed)
+		len += RTE_PMD_TXGBE_RX_MAX_BURST;
+
+	rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
+					  sizeof(struct txgbe_rx_entry) * len,
+					  RTE_CACHE_LINE_SIZE, socket_id);
+	if (!rxq->sw_ring) {
+		txgbe_rx_queue_release(rxq);
+		return -ENOMEM;
+	}
+
+	/*
+	 * Always allocate even if it's not going to be needed in order to
+	 * simplify the code.
+	 *
+	 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
+	 * be requested in txgbe_dev_rx_init(), which is called later from
+	 * dev_start() flow.
+	 */
+	rxq->sw_sc_ring =
+		rte_zmalloc_socket("rxq->sw_sc_ring",
+				  sizeof(struct txgbe_scattered_rx_entry) * len,
+				  RTE_CACHE_LINE_SIZE, socket_id);
+	if (!rxq->sw_sc_ring) {
+		txgbe_rx_queue_release(rxq);
+		return -ENOMEM;
+	}
+
+	PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
+			    "dma_addr=0x%" PRIx64,
+		     rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
+		     rxq->rx_ring_phys_addr);
+
+	dev->data->rx_queues[queue_idx] = rxq;
+
+	txgbe_reset_rx_queue(adapter, rxq);
+
+	return 0;
+}
+
 void __rte_cold
 txgbe_set_rx_function(struct rte_eth_dev *dev)
 {
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index 7d3d9c275..be165dd19 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -5,38 +5,168 @@
 #ifndef _TXGBE_RXTX_H_
 #define _TXGBE_RXTX_H_
 
+/*****************************************************************************
+ * Receive Descriptor
+ *****************************************************************************/
+struct txgbe_rx_desc {
+	struct {
+		union {
+			__le32 dw0;
+			struct {
+				__le16 pkt;
+				__le16 hdr;
+			} lo;
+		};
+		union {
+			__le32 dw1;
+			struct {
+				__le16 ipid;
+				__le16 csum;
+			} hi;
+		};
+	} qw0; /* also as r.pkt_addr */
+	struct {
+		union {
+			__le32 dw2;
+			struct {
+				__le32 status;
+			} lo;
+		};
+		union {
+			__le32 dw3;
+			struct {
+				__le16 len;
+				__le16 tag;
+			} hi;
+		};
+	} qw1; /* also as r.hdr_addr */
+};
+
+/**
+ * Transmit Data Descriptor (TXGBE_TXD_TYP=DATA)
+ **/
+struct txgbe_tx_desc {
+	__le64 qw0; /* r.buffer_addr ,  w.reserved    */
+	__le32 dw2; /* r.cmd_type_len,  w.nxtseq_seed */
+	__le32 dw3; /* r.olinfo_status, w.status      */
+};
+
 #define RTE_PMD_TXGBE_TX_MAX_BURST 32
 #define RTE_PMD_TXGBE_RX_MAX_BURST 32
 
+#define RX_RING_SZ ((TXGBE_RING_DESC_MAX + RTE_PMD_TXGBE_RX_MAX_BURST) * \
+		    sizeof(struct txgbe_rx_desc))
+
+#define TXGBE_PTID_MASK                 0xFF
+
 #define TXGBE_TX_MAX_SEG                    40
 
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+struct txgbe_rx_entry {
+	struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+struct txgbe_scattered_rx_entry {
+	struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct txgbe_tx_entry {
+	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+	uint16_t next_id; /**< Index of next descriptor in ring. */
+	uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct txgbe_tx_entry_v {
+	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+};
+
 /**
  * Structure associated with each RX queue.
  */
 struct txgbe_rx_queue {
 	struct rte_mempool  *mb_pool; /**< mbuf pool to populate RX ring. */
+	volatile struct txgbe_rx_desc *rx_ring; /**< RX ring virtual address. */
 	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
+	volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
+	volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
+	struct txgbe_rx_entry *sw_ring; /**< address of RX software ring. */
+	/**< address of scattered Rx software ring. */
+	struct txgbe_scattered_rx_entry *sw_sc_ring;
+	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+	struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
 	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
+	uint16_t            rx_tail;  /**< current value of RDT register. */
+	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
+	uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
+	uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
+	uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
+	uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
+	uint16_t            queue_id; /**< RX queue index. */
 	uint16_t            reg_idx;  /**< RX queue register index. */
+	/**< Packet type mask for different NICs. */
+	uint16_t            pkt_type_mask;
+	uint16_t            port_id;  /**< Device port identifier. */
 	uint8_t             crc_len;  /**< 0 if CRC stripped, 4 otherwise. */
 	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
+	uint8_t             rx_deferred_start; /**< not in global dev start. */
 	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
+	struct rte_mbuf fake_mbuf;
+	/** hold packets to return to application */
+	struct rte_mbuf *rx_stage[RTE_PMD_TXGBE_RX_MAX_BURST * 2];
 };
 
 /**
  * Structure associated with each TX queue.
  */
 struct txgbe_tx_queue {
+	/** TX ring virtual address. */
+	volatile struct txgbe_tx_desc *tx_ring;
 	uint64_t            tx_ring_phys_addr; /**< TX ring DMA address. */
+	union {
+		/**< address of SW ring for scalar PMD. */
+		struct txgbe_tx_entry *sw_ring;
+		/**< address of SW ring for vector PMD */
+		struct txgbe_tx_entry_v *sw_ring_v;
+	};
+	volatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */
+	volatile uint32_t   *tdc_reg_addr; /**< Address of TDC register. */
 	uint16_t            nb_tx_desc;    /**< number of TX descriptors. */
 	/**< Start freeing TX buffers if there are less free descriptors than
 	 *   this value.
 	 */
 	uint16_t            tx_free_thresh;
+	uint16_t            queue_id;      /**< TX queue index. */
 	uint16_t            reg_idx;       /**< TX queue register index. */
+	uint16_t            port_id;       /**< Device port identifier. */
+	uint8_t             pthresh;       /**< Prefetch threshold register. */
+	uint8_t             hthresh;       /**< Host threshold register. */
+	uint8_t             wthresh;       /**< Write-back threshold reg. */
 	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
+	const struct txgbe_txq_ops *ops;       /**< txq ops */
+	uint8_t             tx_deferred_start; /**< not in global dev start. */
+};
+
+struct txgbe_txq_ops {
+	void (*release_mbufs)(struct txgbe_tx_queue *txq);
+	void (*free_swring)(struct txgbe_tx_queue *txq);
+	void (*reset)(struct txgbe_tx_queue *txq);
 };
 
+/* Takes an ethdev and a queue and sets up the tx function to be used based on
+ * the queue parameters. Used in tx_queue_setup by primary process and then
+ * in dev_init by secondary process when attaching to an existing ethdev.
+ */
+void txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq);
+
 void txgbe_set_rx_function(struct rte_eth_dev *dev);
 
 uint64_t txgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
-- 
2.18.4




  parent reply	other threads:[~2020-10-14  6:03 UTC|newest]

Thread overview: 63+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-14  5:54 [dpdk-dev] [PATCH v3 00/56] net: txgbe PMD Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 01/56] net/txgbe: add build and doc infrastructure Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 02/56] net/txgbe: add ethdev probe and remove Jiawen Wu
2020-10-15  0:56   ` Ferruh Yigit
2020-10-16  9:52     ` Jiawen Wu
2020-10-16 10:33       ` Ferruh Yigit
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 03/56] net/txgbe: add device init and uninit Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 04/56] net/txgbe: add error types and registers Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 05/56] net/txgbe: add MAC type and bus lan id Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 06/56] net/txgbe: add HW infrastructure and dummy function Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 07/56] net/txgbe: add EEPROM functions Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 08/56] net/txgbe: add HW init and reset operation Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 09/56] net/txgbe: add PHY init Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 10/56] net/txgbe: add module identify Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 11/56] net/txgbe: add PHY reset Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 12/56] net/txgbe: add info get operation Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 13/56] net/txgbe: add interrupt operation Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 14/56] net/txgbe: add device configure operation Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 15/56] net/txgbe: add link status change Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 16/56] net/txgbe: add multi-speed link setup Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 17/56] net/txgbe: add autoc read and write Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 18/56] net/txgbe: add MAC address operations Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 19/56] net/txgbe: add unicast hash bitmap Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 20/56] net/txgbe: add Rx and Tx init Jiawen Wu
2020-10-14  5:54 ` Jiawen Wu [this message]
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 22/56] net/txgbe: add Rx and Tx start and stop Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 23/56] net/txgbe: add packet type Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 24/56] net/txgbe: fill simple transmit function Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 25/56] net/txgbe: fill transmit function with hardware offload Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 26/56] net/txgbe: fill Tx prepare function Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 27/56] net/txgbe: fill receive functions Jiawen Wu
2020-10-15  0:55   ` Ferruh Yigit
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 28/56] net/txgbe: add device start operation Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 29/56] net/txgbe: add Rx and Tx data path start and stop Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 30/56] net/txgbe: add device stop and close operations Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 31/56] net/txgbe: support Rx interrupt Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 32/56] net/txgbe: add Rx and Tx queue info get Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 33/56] net/txgbe: add device stats get Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 34/56] net/txgbe: add device xstats get Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 35/56] net/txgbe: add queue stats mapping Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 36/56] net/txgbe: add VLAN handle support Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 37/56] net/txgbe: add SWFW semaphore and lock Jiawen Wu
2020-10-14  5:54 ` [dpdk-dev] [PATCH v3 38/56] net/txgbe: add PF module init and uninit for SRIOV Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 39/56] net/txgbe: add process mailbox operation Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 40/56] net/txgbe: add PF module configure for SRIOV Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 41/56] net/txgbe: add VMDq configure Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 42/56] net/txgbe: add RSS support Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 43/56] net/txgbe: add DCB support Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 44/56] net/txgbe: add flow control support Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 45/56] net/txgbe: add FC auto negotiation support Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 46/56] net/txgbe: add priority flow control support Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 47/56] net/txgbe: add device promiscuous and allmulticast mode Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 48/56] net/txgbe: add MTU set operation Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 49/56] net/txgbe: add FW version get operation Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 50/56] net/txgbe: add EEPROM info " Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 51/56] net/txgbe: add register dump support Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 52/56] net/txgbe: support device LED on and off Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 53/56] net/txgbe: add mirror rule operations Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 54/56] net/txgbe: add PTP support Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 55/56] net/txgbe: add DCB info get operation Jiawen Wu
2020-10-14  5:55 ` [dpdk-dev] [PATCH v3 56/56] net/txgbe: add Rx and Tx descriptor status Jiawen Wu
2020-10-15  0:55   ` Ferruh Yigit
2020-10-15  0:56 ` [dpdk-dev] [PATCH v3 00/56] net: txgbe PMD Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201014055517.1214386-22-jiawenwu@trustnetic.com \
    --to=jiawenwu@trustnetic.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

DPDK patches and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ https://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git