DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: Jiawen Wu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH v5 20/24] net/ngbe: support bulk and scatter Rx
Date: Wed,  2 Jun 2021 17:41:04 +0800	[thread overview]
Message-ID: <20210602094108.1575640-21-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20210602094108.1575640-1-jiawenwu@trustnetic.com>

Add bulk allocation receive function, and support scattered Rx rely on
Rx offload.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 doc/guides/nics/ngbe.rst       |   1 +
 drivers/net/ngbe/ngbe_ethdev.c |  15 +-
 drivers/net/ngbe/ngbe_ethdev.h |   8 +
 drivers/net/ngbe/ngbe_rxtx.c   | 583 +++++++++++++++++++++++++++++++++
 drivers/net/ngbe/ngbe_rxtx.h   |   2 +
 5 files changed, 607 insertions(+), 2 deletions(-)

diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index 04fa3e90a8..e999e0b580 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -14,6 +14,7 @@ Features
 - Checksum offload
 - Jumbo frames
 - Link state information
+- Scattered and gather for RX
 
 Prerequisites
 -------------
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 4dab920caa..260bca0e4f 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -112,8 +112,16 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 	eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
 	eth_dev->tx_pkt_burst = &ngbe_xmit_pkts_simple;
 
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+	/*
+	 * For secondary processes, we don't initialise any further as primary
+	 * has already done this work. Only check we don't need a different
+	 * RX and TX function.
+	 */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		ngbe_set_rx_function(eth_dev);
+
 		return 0;
+	}
 
 	rte_eth_copy_pci_info(eth_dev, pci_dev);
 
@@ -359,7 +367,10 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 const uint32_t *
 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 {
-	if (dev->rx_pkt_burst == ngbe_recv_pkts)
+	if (dev->rx_pkt_burst == ngbe_recv_pkts ||
+	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
+	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
+	    dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
 		return ngbe_get_supported_ptypes();
 
 	return NULL;
diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
index c0f8483eca..1e21db5e25 100644
--- a/drivers/net/ngbe/ngbe_ethdev.h
+++ b/drivers/net/ngbe/ngbe_ethdev.h
@@ -78,6 +78,14 @@ void ngbe_dev_tx_init(struct rte_eth_dev *dev);
 uint16_t ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		uint16_t nb_pkts);
 
+uint16_t ngbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+				    uint16_t nb_pkts);
+
+uint16_t ngbe_recv_pkts_sc_single_alloc(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t ngbe_recv_pkts_sc_bulk_alloc(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
 uint16_t ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
 		uint16_t nb_pkts);
 
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index 9462da5b7a..f633718237 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -321,6 +321,257 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
 	return pkt_flags;
 }
 
+/*
+ * LOOK_AHEAD defines how many desc statuses to check beyond the
+ * current descriptor.
+ * It must be a pound define for optimal performance.
+ * Do not change the value of LOOK_AHEAD, as the ngbe_rx_scan_hw_ring
+ * function only works with LOOK_AHEAD=8.
+ */
+#define LOOK_AHEAD 8
+#if (LOOK_AHEAD != 8)
+#error "PMD NGBE: LOOK_AHEAD must be 8\n"
+#endif
+static inline int
+ngbe_rx_scan_hw_ring(struct ngbe_rx_queue *rxq)
+{
+	volatile struct ngbe_rx_desc *rxdp;
+	struct ngbe_rx_entry *rxep;
+	struct rte_mbuf *mb;
+	uint16_t pkt_len;
+	uint64_t pkt_flags;
+	int nb_dd;
+	uint32_t s[LOOK_AHEAD];
+	uint32_t pkt_info[LOOK_AHEAD];
+	int i, j, nb_rx = 0;
+	uint32_t status;
+
+	/* get references to current descriptor and S/W ring entry */
+	rxdp = &rxq->rx_ring[rxq->rx_tail];
+	rxep = &rxq->sw_ring[rxq->rx_tail];
+
+	status = rxdp->qw1.lo.status;
+	/* check to make sure there is at least 1 packet to receive */
+	if (!(status & rte_cpu_to_le_32(NGBE_RXD_STAT_DD)))
+		return 0;
+
+	/*
+	 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
+	 * reference packets that are ready to be received.
+	 */
+	for (i = 0; i < RTE_PMD_NGBE_RX_MAX_BURST;
+	     i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
+		/* Read desc statuses backwards to avoid race condition */
+		for (j = 0; j < LOOK_AHEAD; j++)
+			s[j] = rte_le_to_cpu_32(rxdp[j].qw1.lo.status);
+
+		rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+
+		/* Compute how many status bits were set */
+		for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
+				(s[nb_dd] & NGBE_RXD_STAT_DD); nb_dd++)
+			;
+
+		for (j = 0; j < nb_dd; j++)
+			pkt_info[j] = rte_le_to_cpu_32(rxdp[j].qw0.dw0);
+
+		nb_rx += nb_dd;
+
+		/* Translate descriptor info to mbuf format */
+		for (j = 0; j < nb_dd; ++j) {
+			mb = rxep[j].mbuf;
+			pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len) -
+				  rxq->crc_len;
+			mb->data_len = pkt_len;
+			mb->pkt_len = pkt_len;
+			mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].qw1.hi.tag);
+
+			/* convert descriptor fields to rte mbuf flags */
+			pkt_flags = rx_desc_status_to_pkt_flags(s[j],
+					rxq->vlan_flags);
+			pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
+			pkt_flags |=
+				ngbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
+			mb->ol_flags = pkt_flags;
+			mb->packet_type =
+				ngbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
+				rxq->pkt_type_mask);
+
+			if (likely(pkt_flags & PKT_RX_RSS_HASH))
+				mb->hash.rss =
+					rte_le_to_cpu_32(rxdp[j].qw0.dw1);
+		}
+
+		/* Move mbuf pointers from the S/W ring to the stage */
+		for (j = 0; j < LOOK_AHEAD; ++j)
+			rxq->rx_stage[i + j] = rxep[j].mbuf;
+
+		/* stop if all requested packets could not be received */
+		if (nb_dd != LOOK_AHEAD)
+			break;
+	}
+
+	/* clear software ring entries so we can cleanup correctly */
+	for (i = 0; i < nb_rx; ++i)
+		rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
+
+	return nb_rx;
+}
+
+static inline int
+ngbe_rx_alloc_bufs(struct ngbe_rx_queue *rxq, bool reset_mbuf)
+{
+	volatile struct ngbe_rx_desc *rxdp;
+	struct ngbe_rx_entry *rxep;
+	struct rte_mbuf *mb;
+	uint16_t alloc_idx;
+	__le64 dma_addr;
+	int diag, i;
+
+	/* allocate buffers in bulk directly into the S/W ring */
+	alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
+	rxep = &rxq->sw_ring[alloc_idx];
+	diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
+				    rxq->rx_free_thresh);
+	if (unlikely(diag != 0))
+		return -ENOMEM;
+
+	rxdp = &rxq->rx_ring[alloc_idx];
+	for (i = 0; i < rxq->rx_free_thresh; ++i) {
+		/* populate the static rte mbuf fields */
+		mb = rxep[i].mbuf;
+		if (reset_mbuf)
+			mb->port = rxq->port_id;
+
+		rte_mbuf_refcnt_set(mb, 1);
+		mb->data_off = RTE_PKTMBUF_HEADROOM;
+
+		/* populate the descriptors */
+		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
+		NGBE_RXD_HDRADDR(&rxdp[i], 0);
+		NGBE_RXD_PKTADDR(&rxdp[i], dma_addr);
+	}
+
+	/* update state of internal queue structure */
+	rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
+	if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
+		rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
+
+	/* no errors */
+	return 0;
+}
+
+static inline uint16_t
+ngbe_rx_fill_from_stage(struct ngbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+			 uint16_t nb_pkts)
+{
+	struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
+	int i;
+
+	/* how many packets are ready to return? */
+	nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
+
+	/* copy mbuf pointers to the application's packet list */
+	for (i = 0; i < nb_pkts; ++i)
+		rx_pkts[i] = stage[i];
+
+	/* update internal queue state */
+	rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
+	rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
+
+	return nb_pkts;
+}
+
+static inline uint16_t
+ngbe_rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+	     uint16_t nb_pkts)
+{
+	struct ngbe_rx_queue *rxq = (struct ngbe_rx_queue *)rx_queue;
+	struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
+	uint16_t nb_rx = 0;
+
+	/* Any previously recv'd pkts will be returned from the Rx stage */
+	if (rxq->rx_nb_avail)
+		return ngbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+	/* Scan the H/W ring for packets to receive */
+	nb_rx = (uint16_t)ngbe_rx_scan_hw_ring(rxq);
+
+	/* update internal queue state */
+	rxq->rx_next_avail = 0;
+	rxq->rx_nb_avail = nb_rx;
+	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
+
+	/* if required, allocate new buffers to replenish descriptors */
+	if (rxq->rx_tail > rxq->rx_free_trigger) {
+		uint16_t cur_free_trigger = rxq->rx_free_trigger;
+
+		if (ngbe_rx_alloc_bufs(rxq, true) != 0) {
+			int i, j;
+
+			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+				   "queue_id=%u", (uint16_t)rxq->port_id,
+				   (uint16_t)rxq->queue_id);
+
+			dev->data->rx_mbuf_alloc_failed +=
+				rxq->rx_free_thresh;
+
+			/*
+			 * Need to rewind any previous receives if we cannot
+			 * allocate new buffers to replenish the old ones.
+			 */
+			rxq->rx_nb_avail = 0;
+			rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
+			for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
+				rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
+
+			return 0;
+		}
+
+		/* update tail pointer */
+		rte_wmb();
+		ngbe_set32_relaxed(rxq->rdt_reg_addr, cur_free_trigger);
+	}
+
+	if (rxq->rx_tail >= rxq->nb_rx_desc)
+		rxq->rx_tail = 0;
+
+	/* received any packets this loop? */
+	if (rxq->rx_nb_avail)
+		return ngbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+	return 0;
+}
+
+/* split requests into chunks of size RTE_PMD_NGBE_RX_MAX_BURST */
+uint16_t
+ngbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+			   uint16_t nb_pkts)
+{
+	uint16_t nb_rx;
+
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	if (likely(nb_pkts <= RTE_PMD_NGBE_RX_MAX_BURST))
+		return ngbe_rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
+
+	/* request is relatively large, chunk it up */
+	nb_rx = 0;
+	while (nb_pkts) {
+		uint16_t ret, n;
+
+		n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_NGBE_RX_MAX_BURST);
+		ret = ngbe_rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
+		nb_rx = (uint16_t)(nb_rx + ret);
+		nb_pkts = (uint16_t)(nb_pkts - ret);
+		if (ret < n)
+			break;
+	}
+
+	return nb_rx;
+}
+
 uint16_t
 ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		uint16_t nb_pkts)
@@ -501,6 +752,288 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	return nb_rx;
 }
 
+/**
+ * ngbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
+ *
+ * Fill the following info in the HEAD buffer of the Rx cluster:
+ *    - RX port identifier
+ *    - hardware offload data, if any:
+ *      - RSS flag & hash
+ *      - IP checksum flag
+ *      - VLAN TCI, if any
+ *      - error flags
+ * @head HEAD of the packet cluster
+ * @desc HW descriptor to get data from
+ * @rxq Pointer to the Rx queue
+ */
+static inline void
+ngbe_fill_cluster_head_buf(struct rte_mbuf *head, struct ngbe_rx_desc *desc,
+		struct ngbe_rx_queue *rxq, uint32_t staterr)
+{
+	uint32_t pkt_info;
+	uint64_t pkt_flags;
+
+	head->port = rxq->port_id;
+
+	/* The vlan_tci field is only valid when PKT_RX_VLAN is
+	 * set in the pkt_flags field.
+	 */
+	head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
+	pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
+	pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
+	pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+	pkt_flags |= ngbe_rxd_pkt_info_to_pkt_flags(pkt_info);
+	head->ol_flags = pkt_flags;
+	head->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
+						rxq->pkt_type_mask);
+
+	if (likely(pkt_flags & PKT_RX_RSS_HASH))
+		head->hash.rss = rte_le_to_cpu_32(desc->qw0.dw1);
+}
+
+/**
+ * ngbe_recv_pkts_sc - receive handler for scatter case.
+ *
+ * @rx_queue Rx queue handle
+ * @rx_pkts table of received packets
+ * @nb_pkts size of rx_pkts table
+ * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
+ *
+ * Returns the number of received packets/clusters (according to the "bulk
+ * receive" interface).
+ */
+static inline uint16_t
+ngbe_recv_pkts_sc(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+		    bool bulk_alloc)
+{
+	struct ngbe_rx_queue *rxq = rx_queue;
+	struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
+	volatile struct ngbe_rx_desc *rx_ring = rxq->rx_ring;
+	struct ngbe_rx_entry *sw_ring = rxq->sw_ring;
+	struct ngbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
+	uint16_t rx_id = rxq->rx_tail;
+	uint16_t nb_rx = 0;
+	uint16_t nb_hold = rxq->nb_rx_hold;
+	uint16_t prev_id = rxq->rx_tail;
+
+	while (nb_rx < nb_pkts) {
+		bool eop;
+		struct ngbe_rx_entry *rxe;
+		struct ngbe_scattered_rx_entry *sc_entry;
+		struct ngbe_scattered_rx_entry *next_sc_entry = NULL;
+		struct ngbe_rx_entry *next_rxe = NULL;
+		struct rte_mbuf *first_seg;
+		struct rte_mbuf *rxm;
+		struct rte_mbuf *nmb = NULL;
+		struct ngbe_rx_desc rxd;
+		uint16_t data_len;
+		uint16_t next_id;
+		volatile struct ngbe_rx_desc *rxdp;
+		uint32_t staterr;
+
+next_desc:
+		rxdp = &rx_ring[rx_id];
+		staterr = rte_le_to_cpu_32(rxdp->qw1.lo.status);
+
+		if (!(staterr & NGBE_RXD_STAT_DD))
+			break;
+
+		rxd = *rxdp;
+
+		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+				  "staterr=0x%x data_len=%u",
+			   rxq->port_id, rxq->queue_id, rx_id, staterr,
+			   rte_le_to_cpu_16(rxd.qw1.hi.len));
+
+		if (!bulk_alloc) {
+			nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
+			if (nmb == NULL) {
+				PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
+						  "port_id=%u queue_id=%u",
+					   rxq->port_id, rxq->queue_id);
+
+				dev->data->rx_mbuf_alloc_failed++;
+				break;
+			}
+		} else if (nb_hold > rxq->rx_free_thresh) {
+			uint16_t next_rdt = rxq->rx_free_trigger;
+
+			if (!ngbe_rx_alloc_bufs(rxq, false)) {
+				rte_wmb();
+				ngbe_set32_relaxed(rxq->rdt_reg_addr,
+							    next_rdt);
+				nb_hold -= rxq->rx_free_thresh;
+			} else {
+				PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
+						  "port_id=%u queue_id=%u",
+					   rxq->port_id, rxq->queue_id);
+
+				dev->data->rx_mbuf_alloc_failed++;
+				break;
+			}
+		}
+
+		nb_hold++;
+		rxe = &sw_ring[rx_id];
+		eop = staterr & NGBE_RXD_STAT_EOP;
+
+		next_id = rx_id + 1;
+		if (next_id == rxq->nb_rx_desc)
+			next_id = 0;
+
+		/* Prefetch next mbuf while processing current one. */
+		rte_ngbe_prefetch(sw_ring[next_id].mbuf);
+
+		/*
+		 * When next RX descriptor is on a cache-line boundary,
+		 * prefetch the next 4 RX descriptors and the next 4 pointers
+		 * to mbufs.
+		 */
+		if ((next_id & 0x3) == 0) {
+			rte_ngbe_prefetch(&rx_ring[next_id]);
+			rte_ngbe_prefetch(&sw_ring[next_id]);
+		}
+
+		rxm = rxe->mbuf;
+
+		if (!bulk_alloc) {
+			__le64 dma =
+			  rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+			/*
+			 * Update RX descriptor with the physical address of the
+			 * new data buffer of the new allocated mbuf.
+			 */
+			rxe->mbuf = nmb;
+
+			rxm->data_off = RTE_PKTMBUF_HEADROOM;
+			NGBE_RXD_HDRADDR(rxdp, 0);
+			NGBE_RXD_PKTADDR(rxdp, dma);
+		} else {
+			rxe->mbuf = NULL;
+		}
+
+		/*
+		 * Set data length & data buffer address of mbuf.
+		 */
+		data_len = rte_le_to_cpu_16(rxd.qw1.hi.len);
+		rxm->data_len = data_len;
+
+		if (!eop) {
+			uint16_t nextp_id;
+
+			nextp_id = next_id;
+			next_sc_entry = &sw_sc_ring[nextp_id];
+			next_rxe = &sw_ring[nextp_id];
+			rte_ngbe_prefetch(next_rxe);
+		}
+
+		sc_entry = &sw_sc_ring[rx_id];
+		first_seg = sc_entry->fbuf;
+		sc_entry->fbuf = NULL;
+
+		/*
+		 * If this is the first buffer of the received packet,
+		 * set the pointer to the first mbuf of the packet and
+		 * initialize its context.
+		 * Otherwise, update the total length and the number of segments
+		 * of the current scattered packet, and update the pointer to
+		 * the last mbuf of the current packet.
+		 */
+		if (first_seg == NULL) {
+			first_seg = rxm;
+			first_seg->pkt_len = data_len;
+			first_seg->nb_segs = 1;
+		} else {
+			first_seg->pkt_len += data_len;
+			first_seg->nb_segs++;
+		}
+
+		prev_id = rx_id;
+		rx_id = next_id;
+
+		/*
+		 * If this is not the last buffer of the received packet, update
+		 * the pointer to the first mbuf at the NEXTP entry in the
+		 * sw_sc_ring and continue to parse the RX ring.
+		 */
+		if (!eop && next_rxe) {
+			rxm->next = next_rxe->mbuf;
+			next_sc_entry->fbuf = first_seg;
+			goto next_desc;
+		}
+
+		/* Initialize the first mbuf of the returned packet */
+		ngbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
+
+		/* Deal with the case, when HW CRC srip is disabled. */
+		first_seg->pkt_len -= rxq->crc_len;
+		if (unlikely(rxm->data_len <= rxq->crc_len)) {
+			struct rte_mbuf *lp;
+
+			for (lp = first_seg; lp->next != rxm; lp = lp->next)
+				;
+
+			first_seg->nb_segs--;
+			lp->data_len -= rxq->crc_len - rxm->data_len;
+			lp->next = NULL;
+			rte_pktmbuf_free_seg(rxm);
+		} else {
+			rxm->data_len -= rxq->crc_len;
+		}
+
+		/* Prefetch data of first segment, if configured to do so. */
+		rte_packet_prefetch((char *)first_seg->buf_addr +
+			first_seg->data_off);
+
+		/*
+		 * Store the mbuf address into the next entry of the array
+		 * of returned packets.
+		 */
+		rx_pkts[nb_rx++] = first_seg;
+	}
+
+	/*
+	 * Record index of the next RX descriptor to probe.
+	 */
+	rxq->rx_tail = rx_id;
+
+	/*
+	 * If the number of free RX descriptors is greater than the RX free
+	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+	 * register.
+	 * Update the RDT with the value of the last processed RX descriptor
+	 * minus 1, to guarantee that the RDT register is never equal to the
+	 * RDH register, which creates a "full" ring situation from the
+	 * hardware point of view...
+	 */
+	if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
+		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+			   "nb_hold=%u nb_rx=%u",
+			   rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
+
+		rte_wmb();
+		ngbe_set32_relaxed(rxq->rdt_reg_addr, prev_id);
+		nb_hold = 0;
+	}
+
+	rxq->nb_rx_hold = nb_hold;
+	return nb_rx;
+}
+
+uint16_t
+ngbe_recv_pkts_sc_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+				 uint16_t nb_pkts)
+{
+	return ngbe_recv_pkts_sc(rx_queue, rx_pkts, nb_pkts, false);
+}
+
+uint16_t
+ngbe_recv_pkts_sc_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+			       uint16_t nb_pkts)
+{
+	return ngbe_recv_pkts_sc(rx_queue, rx_pkts, nb_pkts, true);
+}
+
 /*********************************************************************
  *
  *  Queue management functions
@@ -1064,6 +1597,54 @@ ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	return 0;
 }
 
+void __rte_cold
+ngbe_set_rx_function(struct rte_eth_dev *dev)
+{
+	struct ngbe_adapter *adapter = NGBE_DEV_ADAPTER(dev);
+
+	if (dev->data->scattered_rx) {
+		/*
+		 * Set the scattered callback: there are bulk and
+		 * single allocation versions.
+		 */
+		if (adapter->rx_bulk_alloc_allowed) {
+			PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
+					   "allocation callback (port=%d).",
+				     dev->data->port_id);
+			dev->rx_pkt_burst = ngbe_recv_pkts_sc_bulk_alloc;
+		} else {
+			PMD_INIT_LOG(DEBUG, "Using Regular (non-vector, "
+					    "single allocation) "
+					    "Scattered Rx callback "
+					    "(port=%d).",
+				     dev->data->port_id);
+
+			dev->rx_pkt_burst = ngbe_recv_pkts_sc_single_alloc;
+		}
+	/*
+	 * Below we set "simple" callbacks according to port/queues parameters.
+	 * If parameters allow we are going to choose between the following
+	 * callbacks:
+	 *    - Bulk Allocation
+	 *    - Single buffer allocation (the simplest one)
+	 */
+	} else if (adapter->rx_bulk_alloc_allowed) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+				    "satisfied. Rx Burst Bulk Alloc function "
+				    "will be used on port=%d.",
+			     dev->data->port_id);
+
+		dev->rx_pkt_burst = ngbe_recv_pkts_bulk_alloc;
+	} else {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+				    "satisfied, or Scattered Rx is requested "
+				    "(port=%d).",
+			     dev->data->port_id);
+
+		dev->rx_pkt_burst = ngbe_recv_pkts;
+	}
+}
+
 /*
  * Initializes Receive Unit.
  */
@@ -1211,6 +1792,8 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)
 		wr32(hw, NGBE_SECRXCTL, rdrxctl);
 	}
 
+	ngbe_set_rx_function(dev);
+
 	return 0;
 }
 
diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h
index d6b9127cb4..4b8596b24a 100644
--- a/drivers/net/ngbe/ngbe_rxtx.h
+++ b/drivers/net/ngbe/ngbe_rxtx.h
@@ -298,6 +298,8 @@ struct ngbe_txq_ops {
 	void (*reset)(struct ngbe_tx_queue *txq);
 };
 
+void ngbe_set_rx_function(struct rte_eth_dev *dev);
+
 uint64_t ngbe_get_tx_port_offloads(struct rte_eth_dev *dev);
 uint64_t ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
 uint64_t ngbe_get_rx_port_offloads(struct rte_eth_dev *dev);
-- 
2.27.0




  parent reply	other threads:[~2021-06-02  9:42 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-02  9:40 [dpdk-dev] [PATCH v5 00/24] net: ngbe PMD Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 01/24] net/ngbe: add build and doc infrastructure Jiawen Wu
2021-06-14 17:05   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 02/24] net/ngbe: add device IDs Jiawen Wu
2021-06-14 17:08   ` Andrew Rybchenko
2021-06-15  2:52     ` Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 03/24] net/ngbe: support probe and remove Jiawen Wu
2021-06-14 17:27   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 04/24] net/ngbe: add device init and uninit Jiawen Wu
2021-06-14 17:36   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 05/24] net/ngbe: add log type and error type Jiawen Wu
2021-06-14 17:54   ` Andrew Rybchenko
2021-06-15  7:13     ` Jiawen Wu
2021-07-01 13:57   ` David Marchand
2021-07-02  2:08     ` Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 06/24] net/ngbe: define registers Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 07/24] net/ngbe: set MAC type and LAN id Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 08/24] net/ngbe: init and validate EEPROM Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 09/24] net/ngbe: add HW initialization Jiawen Wu
2021-06-14 18:01   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 10/24] net/ngbe: identify PHY and reset PHY Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 11/24] net/ngbe: store MAC address Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 12/24] net/ngbe: add info get operation Jiawen Wu
2021-06-14 18:13   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 13/24] net/ngbe: support link update Jiawen Wu
2021-06-14 18:45   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 14/24] net/ngbe: setup the check PHY link Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 15/24] net/ngbe: add Rx queue setup and release Jiawen Wu
2021-06-14 18:53   ` Andrew Rybchenko
2021-06-15  7:50     ` Jiawen Wu
2021-06-15  8:06       ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 16/24] net/ngbe: add Tx " Jiawen Wu
2021-06-14 18:59   ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 17/24] net/ngbe: add Rx and Tx init Jiawen Wu
2021-06-14 19:01   ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 18/24] net/ngbe: add packet type Jiawen Wu
2021-06-14 19:06   ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 19/24] net/ngbe: add simple Rx and Tx flow Jiawen Wu
2021-06-14 19:10   ` Andrew Rybchenko
2021-06-02  9:41 ` Jiawen Wu [this message]
2021-06-14 19:17   ` [dpdk-dev] [PATCH v5 20/24] net/ngbe: support bulk and scatter Rx Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 21/24] net/ngbe: support full-featured Tx path Jiawen Wu
2021-06-14 19:22   ` Andrew Rybchenko
2021-06-14 19:23     ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 22/24] net/ngbe: add device start operation Jiawen Wu
2021-06-14 19:33   ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 23/24] net/ngbe: start and stop RxTx Jiawen Wu
2021-06-14 20:44   ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 24/24] net/ngbe: add device stop operation Jiawen Wu
2021-06-11  1:38 ` [dpdk-dev] [PATCH v5 00/24] net: ngbe PMD Jiawen Wu
2021-06-14 20:56 ` Andrew Rybchenko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210602094108.1575640-21-jiawenwu@trustnetic.com \
    --to=jiawenwu@trustnetic.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).