DPDK patches and discussions
 help / color / mirror / Atom feed
From: Michal Krawczyk <mk@semihalf.com>
To: dev@dpdk.org
Cc: mw@semihalf.com, mba@semihalf.com, gtzalik@amazon.com,
	evgenys@amazon.com, igorch@amazon.com, ferruh.yigit@intel.com,
	arybchenko@solarflare.com, Michal Krawczyk <mk@semihalf.com>
Subject: [dpdk-dev] [PATCH v3 27/30] net/ena: refactor Tx path
Date: Wed,  8 Apr 2020 10:29:18 +0200	[thread overview]
Message-ID: <20200408082921.31000-28-mk@semihalf.com> (raw)
In-Reply-To: <20200408082921.31000-1-mk@semihalf.com>

The original Tx function was very long and was containing both cleanup
and the sending sections. Because of that it was having a lot of local
variables, big indentation and was hard to read.

This function was split into 2 sections:
  * Sending - which is responsible for preparing the mbuf, mapping it
    to the device descriptors and finally, sending packet to the HW
  * Cleanup - which is releasing packets sent by the HW. Loop which was
    releasing packets was reworked a bit, to make intention more visible
    and aligned with other parts of the driver.

Signed-off-by: Michal Krawczyk <mk@semihalf.com>
Reviewed-by: Igor Chauskin <igorch@amazon.com>
Reviewed-by: Guy Tzalik <gtzalik@amazon.com>
---
v2:
  * Fix compilation error on icc by adding braces around 0

 drivers/net/ena/ena_ethdev.c | 323 +++++++++++++++++++----------------
 1 file changed, 179 insertions(+), 144 deletions(-)

diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index f6d0a75819..1a7cc686f5 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -169,6 +169,13 @@ static int ena_device_init(struct ena_com_dev *ena_dev,
 			   struct ena_com_dev_get_features_ctx *get_feat_ctx,
 			   bool *wd_state);
 static int ena_dev_configure(struct rte_eth_dev *dev);
+static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
+	struct ena_tx_buffer *tx_info,
+	struct rte_mbuf *mbuf,
+	void **push_header,
+	uint16_t *header_len);
+static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf);
+static void ena_tx_cleanup(struct ena_ring *tx_ring);
 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 				  uint16_t nb_pkts);
 static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -2343,193 +2350,221 @@ static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring,
 	return rc;
 }
 
-static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-				  uint16_t nb_pkts)
+static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
+	struct ena_tx_buffer *tx_info,
+	struct rte_mbuf *mbuf,
+	void **push_header,
+	uint16_t *header_len)
 {
-	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
-	uint16_t next_to_use = tx_ring->next_to_use;
-	uint16_t next_to_clean = tx_ring->next_to_clean;
-	struct rte_mbuf *mbuf;
-	uint16_t seg_len;
-	unsigned int cleanup_budget;
-	struct ena_com_tx_ctx ena_tx_ctx;
-	struct ena_tx_buffer *tx_info;
-	struct ena_com_buf *ebuf;
-	uint16_t rc, req_id, total_tx_descs = 0;
-	uint16_t sent_idx = 0;
-	uint16_t push_len = 0;
-	uint16_t delta = 0;
-	int nb_hw_desc;
-	uint32_t total_length;
-
-	/* Check adapter state */
-	if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
-		PMD_DRV_LOG(ALERT,
-			"Trying to xmit pkts while device is NOT running\n");
-		return 0;
-	}
+	struct ena_com_buf *ena_buf;
+	uint16_t delta, seg_len, push_len;
 
-	nb_pkts = RTE_MIN(ena_com_free_q_entries(tx_ring->ena_com_io_sq),
-		nb_pkts);
+	delta = 0;
+	seg_len = mbuf->data_len;
 
-	for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
-		mbuf = tx_pkts[sent_idx];
-		total_length = 0;
+	tx_info->mbuf = mbuf;
+	ena_buf = tx_info->bufs;
 
-		rc = ena_check_and_linearize_mbuf(tx_ring, mbuf);
-		if (unlikely(rc))
-			break;
+	if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+		/*
+		 * Tx header might be (and will be in most cases) smaller than
+		 * tx_max_header_size. But it's not an issue to send more data
+		 * to the device, than actually needed if the mbuf size is
+		 * greater than tx_max_header_size.
+		 */
+		push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size);
+		*header_len = push_len;
 
-		req_id = tx_ring->empty_tx_reqs[next_to_use];
-		tx_info = &tx_ring->tx_buffer_info[req_id];
-		tx_info->mbuf = mbuf;
-		tx_info->num_of_bufs = 0;
-		ebuf = tx_info->bufs;
+		if (likely(push_len <= seg_len)) {
+			/* If the push header is in the single segment, then
+			 * just point it to the 1st mbuf data.
+			 */
+			*push_header = rte_pktmbuf_mtod(mbuf, uint8_t *);
+		} else {
+			/* If the push header lays in the several segments, copy
+			 * it to the intermediate buffer.
+			 */
+			rte_pktmbuf_read(mbuf, 0, push_len,
+				tx_ring->push_buf_intermediate_buf);
+			*push_header = tx_ring->push_buf_intermediate_buf;
+			delta = push_len - seg_len;
+		}
+	} else {
+		*push_header = NULL;
+		*header_len = 0;
+		push_len = 0;
+	}
 
-		/* Prepare TX context */
-		memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
-		memset(&ena_tx_ctx.ena_meta, 0x0,
-		       sizeof(struct ena_com_tx_meta));
-		ena_tx_ctx.ena_bufs = ebuf;
-		ena_tx_ctx.req_id = req_id;
+	/* Process first segment taking into consideration pushed header */
+	if (seg_len > push_len) {
+		ena_buf->paddr = mbuf->buf_iova +
+				mbuf->data_off +
+				push_len;
+		ena_buf->len = seg_len - push_len;
+		ena_buf++;
+		tx_info->num_of_bufs++;
+	}
 
-		delta = 0;
+	while ((mbuf = mbuf->next) != NULL) {
 		seg_len = mbuf->data_len;
 
-		if (tx_ring->tx_mem_queue_type ==
-				ENA_ADMIN_PLACEMENT_POLICY_DEV) {
-			push_len = RTE_MIN(mbuf->pkt_len,
-					   tx_ring->tx_max_header_size);
-			ena_tx_ctx.header_len = push_len;
-
-			if (likely(push_len <= seg_len)) {
-				/* If the push header is in the single segment,
-				 * then just point it to the 1st mbuf data.
-				 */
-				ena_tx_ctx.push_header =
-					rte_pktmbuf_mtod(mbuf, uint8_t *);
-			} else {
-				/* If the push header lays in the several
-				 * segments, copy it to the intermediate buffer.
-				 */
-				rte_pktmbuf_read(mbuf, 0, push_len,
-					tx_ring->push_buf_intermediate_buf);
-				ena_tx_ctx.push_header =
-					tx_ring->push_buf_intermediate_buf;
-				delta = push_len - seg_len;
-			}
-		} /* there's no else as we take advantage of memset zeroing */
+		/* Skip mbufs if whole data is pushed as a header */
+		if (unlikely(delta > seg_len)) {
+			delta -= seg_len;
+			continue;
+		}
 
-		/* Set TX offloads flags, if applicable */
-		ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads,
-			tx_ring->disable_meta_caching);
+		ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta;
+		ena_buf->len = seg_len - delta;
+		ena_buf++;
+		tx_info->num_of_bufs++;
 
-		rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(
-			sent_idx, 4, tx_ring->size_mask)]);
+		delta = 0;
+	}
+}
 
-		/* Process first segment taking into
-		 * consideration pushed header
-		 */
-		if (seg_len > push_len) {
-			ebuf->paddr = mbuf->buf_iova +
-				      mbuf->data_off +
-				      push_len;
-			ebuf->len = seg_len - push_len;
-			ebuf++;
-			tx_info->num_of_bufs++;
-		}
-		total_length += mbuf->data_len;
+static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
+{
+	struct ena_tx_buffer *tx_info;
+	struct ena_com_tx_ctx ena_tx_ctx = { { 0 } };
+	uint16_t next_to_use;
+	uint16_t header_len;
+	uint16_t req_id;
+	void *push_header;
+	int nb_hw_desc;
+	int rc;
 
-		while ((mbuf = mbuf->next) != NULL) {
-			seg_len = mbuf->data_len;
+	rc = ena_check_and_linearize_mbuf(tx_ring, mbuf);
+	if (unlikely(rc))
+		return rc;
 
-			/* Skip mbufs if whole data is pushed as a header */
-			if (unlikely(delta > seg_len)) {
-				delta -= seg_len;
-				continue;
-			}
+	next_to_use = tx_ring->next_to_use;
 
-			ebuf->paddr = mbuf->buf_iova + mbuf->data_off + delta;
-			ebuf->len = seg_len - delta;
-			total_length += ebuf->len;
-			ebuf++;
-			tx_info->num_of_bufs++;
+	req_id = tx_ring->empty_tx_reqs[next_to_use];
+	tx_info = &tx_ring->tx_buffer_info[req_id];
+	tx_info->num_of_bufs = 0;
 
-			delta = 0;
-		}
+	ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len);
 
-		ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
+	ena_tx_ctx.ena_bufs = tx_info->bufs;
+	ena_tx_ctx.push_header = push_header;
+	ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
+	ena_tx_ctx.req_id = req_id;
+	ena_tx_ctx.header_len = header_len;
 
-		if (ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq,
-					       &ena_tx_ctx)) {
-			PMD_DRV_LOG(DEBUG, "llq tx max burst size of queue %d"
-				" achieved, writing doorbell to send burst\n",
-				tx_ring->id);
-			ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
-		}
-
-		/* prepare the packet's descriptors to dma engine */
-		rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq,
-					&ena_tx_ctx, &nb_hw_desc);
-		if (unlikely(rc)) {
-			++tx_ring->tx_stats.prepare_ctx_err;
-			break;
-		}
-		tx_info->tx_descs = nb_hw_desc;
+	/* Set Tx offloads flags, if applicable */
+	ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads,
+		tx_ring->disable_meta_caching);
 
-		next_to_use = ENA_IDX_NEXT_MASKED(next_to_use,
-			tx_ring->size_mask);
-		tx_ring->tx_stats.cnt++;
-		tx_ring->tx_stats.bytes += total_length;
+	if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq,
+			&ena_tx_ctx))) {
+		PMD_DRV_LOG(DEBUG,
+			"llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
+			tx_ring->id);
+		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
 	}
-	tx_ring->tx_stats.available_desc =
-		ena_com_free_q_entries(tx_ring->ena_com_io_sq);
 
-	/* If there are ready packets to be xmitted... */
-	if (sent_idx > 0) {
-		/* ...let HW do its best :-) */
-		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
-		tx_ring->tx_stats.doorbells++;
-		tx_ring->next_to_use = next_to_use;
+	/* prepare the packet's descriptors to dma engine */
+	rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq,	&ena_tx_ctx,
+		&nb_hw_desc);
+	if (unlikely(rc)) {
+		++tx_ring->tx_stats.prepare_ctx_err;
+		return rc;
 	}
 
-	/* Clear complete packets  */
-	while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) {
-		rc = validate_tx_req_id(tx_ring, req_id);
-		if (rc)
+	tx_info->tx_descs = nb_hw_desc;
+
+	tx_ring->tx_stats.cnt++;
+	tx_ring->tx_stats.bytes += mbuf->pkt_len;
+
+	tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use,
+		tx_ring->size_mask);
+
+	return 0;
+}
+
+static void ena_tx_cleanup(struct ena_ring *tx_ring)
+{
+	unsigned int cleanup_budget;
+	unsigned int total_tx_descs = 0;
+	uint16_t next_to_clean = tx_ring->next_to_clean;
+
+	cleanup_budget = RTE_MIN(tx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER,
+		(unsigned int)ENA_REFILL_THRESH_PACKET);
+
+	while (likely(total_tx_descs < cleanup_budget)) {
+		struct rte_mbuf *mbuf;
+		struct ena_tx_buffer *tx_info;
+		uint16_t req_id;
+
+		if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0)
+			break;
+
+		if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0))
 			break;
 
 		/* Get Tx info & store how many descs were processed  */
 		tx_info = &tx_ring->tx_buffer_info[req_id];
-		total_tx_descs += tx_info->tx_descs;
 
-		/* Free whole mbuf chain  */
 		mbuf = tx_info->mbuf;
 		rte_pktmbuf_free(mbuf);
+
 		tx_info->mbuf = NULL;
+		tx_ring->empty_tx_reqs[next_to_clean] = req_id;
+
+		total_tx_descs += tx_info->tx_descs;
 
 		/* Put back descriptor to the ring for reuse */
-		tx_ring->empty_tx_reqs[next_to_clean] = req_id;
 		next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean,
 			tx_ring->size_mask);
-		cleanup_budget =
-			RTE_MIN(tx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER,
-			(unsigned int)ENA_REFILL_THRESH_PACKET);
-
-		/* If too many descs to clean, leave it for another run */
-		if (unlikely(total_tx_descs > cleanup_budget))
-			break;
 	}
-	tx_ring->tx_stats.available_desc =
-		ena_com_free_q_entries(tx_ring->ena_com_io_sq);
 
-	if (total_tx_descs > 0) {
+	if (likely(total_tx_descs > 0)) {
 		/* acknowledge completion of sent packets */
 		tx_ring->next_to_clean = next_to_clean;
 		ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
 		ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
 	}
+}
+
+static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+				  uint16_t nb_pkts)
+{
+	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
+	uint16_t sent_idx = 0;
+
+	/* Check adapter state */
+	if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
+		PMD_DRV_LOG(ALERT,
+			"Trying to xmit pkts while device is NOT running\n");
+		return 0;
+	}
+
+	nb_pkts = RTE_MIN(ena_com_free_q_entries(tx_ring->ena_com_io_sq),
+		nb_pkts);
+
+	for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
+		if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx]))
+			break;
 
+		rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4,
+			tx_ring->size_mask)]);
+	}
+
+	tx_ring->tx_stats.available_desc =
+		ena_com_free_q_entries(tx_ring->ena_com_io_sq);
+
+	/* If there are ready packets to be xmitted... */
+	if (sent_idx > 0) {
+		/* ...let HW do its best :-) */
+		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+		tx_ring->tx_stats.doorbells++;
+	}
+
+	ena_tx_cleanup(tx_ring);
+
+	tx_ring->tx_stats.available_desc =
+		ena_com_free_q_entries(tx_ring->ena_com_io_sq);
 	tx_ring->tx_stats.tx_poll++;
 
 	return sent_idx;
-- 
2.20.1


  parent reply	other threads:[~2020-04-08  8:34 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-08  8:28 [dpdk-dev] [PATCH v3 00/30] Update ENA driver to v2.1.0 Michal Krawczyk
2020-04-08  8:28 ` [dpdk-dev] [PATCH v3 01/30] net/ena: check if size of buffer is at least 1400B Michal Krawczyk
2020-04-08  8:28 ` [dpdk-dev] [PATCH v3 02/30] net/ena/base: make allocation macros thread-safe Michal Krawczyk
2020-04-08  8:28 ` [dpdk-dev] [PATCH v3 03/30] net/ena/base: prevent allocation of 0-sized memory Michal Krawczyk
2020-04-08  8:28 ` [dpdk-dev] [PATCH v3 04/30] net/ena/base: generate default, random RSS hash key Michal Krawczyk
2020-05-01 15:28   ` Ferruh Yigit
2020-05-01 22:56     ` David Christensen
2020-04-08  8:28 ` [dpdk-dev] [PATCH v3 05/30] net/ena/base: fix testing for supported hash func Michal Krawczyk
2020-04-08  8:28 ` [dpdk-dev] [PATCH v3 06/30] net/ena/base: remove conversion of the ind tbl Michal Krawczyk
2020-04-08  8:28 ` [dpdk-dev] [PATCH v3 07/30] net/ena/base: rework interrupt moderation Michal Krawczyk
2020-04-08  8:28 ` [dpdk-dev] [PATCH v3 08/30] net/ena/base: remove extra properties strings Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 09/30] net/ena/base: add accelerated LLQ mode Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 10/30] net/ena/base: fix documentation of the functions Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 11/30] net/ena/base: fix indentation in cq polling Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 12/30] net/ena/base: add error logs when preparing Tx Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 13/30] net/ena/base: use 48-bit memory addresses in ena_com Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 14/30] net/ena/base: fix types for printing timestamps Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 15/30] net/ena/base: fix indentation of multiple defines Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 16/30] net/ena/base: update gen date and commit Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 17/30] net/ena: set IO ring size to the valid value Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 18/30] net/ena: refactor getting IO queues capabilities Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 19/30] net/ena: add support for large LLQ headers Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 20/30] net/ena: remove memory barriers before doorbells Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 21/30] net/ena: add Tx drops statistic Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 22/30] net/ena: disable meta caching Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 23/30] net/ena: refactor Rx path Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 24/30] net/ena: rework getting number of available descs Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 25/30] net/ena: limit refill threshold by fixed value Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 26/30] net/ena: use macros for ring idx operations Michal Krawczyk
2020-04-08  8:29 ` Michal Krawczyk [this message]
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 28/30] net/ena: reuse 0 length Rx descriptor Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 29/30] doc: add notes on ENA usage on metal instances Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 30/30] net/ena: update version of the driver to v2.1.0 Michal Krawczyk
2020-04-08 13:23 ` [dpdk-dev] [PATCH v3 00/30] Update ENA " Ferruh Yigit
2020-04-09 10:56   ` Michał Krawczyk
2020-04-09 11:40     ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200408082921.31000-28-mk@semihalf.com \
    --to=mk@semihalf.com \
    --cc=arybchenko@solarflare.com \
    --cc=dev@dpdk.org \
    --cc=evgenys@amazon.com \
    --cc=ferruh.yigit@intel.com \
    --cc=gtzalik@amazon.com \
    --cc=igorch@amazon.com \
    --cc=mba@semihalf.com \
    --cc=mw@semihalf.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).