From: Michal Krawczyk <mk@semihalf.com>
To: dev@dpdk.org
Cc: mw@semihalf.com, mba@semihalf.com, gtzalik@amazon.com,
evgenys@amazon.com, igorch@amazon.com,
Michal Krawczyk <mk@semihalf.com>
Subject: [dpdk-dev] [PATCH v2 21/29] net/ena: refactor Rx path
Date: Wed, 1 Apr 2020 16:21:19 +0200 [thread overview]
Message-ID: <20200401142127.13715-22-mk@semihalf.com> (raw)
In-Reply-To: <20200401142127.13715-1-mk@semihalf.com>
* Split main Rx function into multiple ones - the body of the main
was very big and further there were 2 nested loops, which were
making the code hard to read
* Rework how the Rx mbuf chains are being created - Instead of having
while loop which has conditional check if it's first segment, handle
this segment outside the loop and if more fragments are existing,
process them inside.
* Initialize Rx mbuf using simple function - it's the common thing for
the 1st and next segments.
* Create structure for Rx buffer to align it with Tx path, other ENA
drivers and to make the variable name more descriptive - on DPDK, Rx
buffer must hold only mbuf, so initially array of mbufs was used as
the buffers. However, it was misleading, as it was named
"rx_buffer_info". To make it more clear, the structure holding mbuf
pointer was added and now there is possibility to expand it in the
future without reworking the driver.
* Remove redundant variables and conditional checks.
Signed-off-by: Michal Krawczyk <mk@semihalf.com>
Reviewed-by: Igor Chauskin <igorch@amazon.com>
Reviewed-by: Guy Tzalik <gtzalik@amazon.com>
---
drivers/net/ena/ena_ethdev.c | 182 ++++++++++++++++++++++-------------
drivers/net/ena/ena_ethdev.h | 8 +-
2 files changed, 124 insertions(+), 66 deletions(-)
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 64c91531a7..1a9c28122b 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -188,6 +188,12 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp);
+static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len);
+static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
+ struct ena_com_rx_buf_info *ena_bufs,
+ uint32_t descs,
+ uint16_t *next_to_clean,
+ uint8_t offset);
static uint16_t eth_ena_recv_pkts(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count);
@@ -749,11 +755,13 @@ static void ena_rx_queue_release_bufs(struct ena_ring *ring)
{
unsigned int i;
- for (i = 0; i < ring->ring_size; ++i)
- if (ring->rx_buffer_info[i]) {
- rte_mbuf_raw_free(ring->rx_buffer_info[i]);
- ring->rx_buffer_info[i] = NULL;
+ for (i = 0; i < ring->ring_size; ++i) {
+ struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i];
+ if (rx_info->mbuf) {
+ rte_mbuf_raw_free(rx_info->mbuf);
+ rx_info->mbuf = NULL;
}
+ }
}
static void ena_tx_queue_release_bufs(struct ena_ring *ring)
@@ -1365,8 +1373,8 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
rxq->mb_pool = mp;
rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info",
- sizeof(struct rte_mbuf *) * nb_desc,
- RTE_CACHE_LINE_SIZE);
+ sizeof(struct ena_rx_buffer) * nb_desc,
+ RTE_CACHE_LINE_SIZE);
if (!rxq->rx_buffer_info) {
PMD_DRV_LOG(ERR, "failed to alloc mem for rx buffer info\n");
return -ENOMEM;
@@ -1434,15 +1442,17 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
uint16_t next_to_use_masked = next_to_use & ring_mask;
struct rte_mbuf *mbuf = mbufs[i];
struct ena_com_buf ebuf;
+ struct ena_rx_buffer *rx_info;
if (likely((i + 4) < count))
rte_prefetch0(mbufs[i + 4]);
req_id = rxq->empty_rx_reqs[next_to_use_masked];
rc = validate_rx_req_id(rxq, req_id);
- if (unlikely(rc < 0))
+ if (unlikely(rc))
break;
- rxq->rx_buffer_info[req_id] = mbuf;
+
+ rx_info = &rxq->rx_buffer_info[req_id];
/* prepare physical address for DMA transaction */
ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
@@ -1452,9 +1462,9 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
&ebuf, req_id);
if (unlikely(rc)) {
PMD_DRV_LOG(WARNING, "failed adding rx desc\n");
- rxq->rx_buffer_info[req_id] = NULL;
break;
}
+ rx_info->mbuf = mbuf;
next_to_use++;
}
@@ -2052,6 +2062,83 @@ static int ena_infos_get(struct rte_eth_dev *dev,
return 0;
}
+static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len)
+{
+ mbuf->data_len = len;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->refcnt = 1;
+ mbuf->next = NULL;
+}
+
+static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
+ struct ena_com_rx_buf_info *ena_bufs,
+ uint32_t descs,
+ uint16_t *next_to_clean,
+ uint8_t offset)
+{
+ struct rte_mbuf *mbuf;
+ struct rte_mbuf *mbuf_head;
+ struct ena_rx_buffer *rx_info;
+ unsigned int ring_mask = rx_ring->ring_size - 1;
+ uint16_t ntc, len, req_id, buf = 0;
+
+ if (unlikely(descs == 0))
+ return NULL;
+
+ ntc = *next_to_clean;
+
+ len = ena_bufs[buf].len;
+ req_id = ena_bufs[buf].req_id;
+ if (unlikely(validate_rx_req_id(rx_ring, req_id)))
+ return NULL;
+
+ rx_info = &rx_ring->rx_buffer_info[req_id];
+
+ mbuf = rx_info->mbuf;
+ RTE_ASSERT(mbuf != NULL);
+
+ ena_init_rx_mbuf(mbuf, len);
+
+ /* Fill the mbuf head with the data specific for 1st segment. */
+ mbuf_head = mbuf;
+ mbuf_head->nb_segs = descs;
+ mbuf_head->port = rx_ring->port_id;
+ mbuf_head->pkt_len = len;
+ mbuf_head->data_off += offset;
+
+ rx_info->mbuf = NULL;
+ rx_ring->empty_rx_reqs[ntc & ring_mask] = req_id;
+ ++ntc;
+
+ while (--descs) {
+ ++buf;
+ len = ena_bufs[buf].len;
+ req_id = ena_bufs[buf].req_id;
+ if (unlikely(validate_rx_req_id(rx_ring, req_id))) {
+ rte_mbuf_raw_free(mbuf_head);
+ return NULL;
+ }
+
+ rx_info = &rx_ring->rx_buffer_info[req_id];
+ RTE_ASSERT(rx_info->mbuf != NULL);
+
+ /* Create an mbuf chain. */
+ mbuf->next = rx_info->mbuf;
+ mbuf = mbuf->next;
+
+ ena_init_rx_mbuf(mbuf, len);
+ mbuf_head->pkt_len += len;
+
+ rx_info->mbuf = NULL;
+ rx_ring->empty_rx_reqs[ntc & ring_mask] = req_id;
+ ++ntc;
+ }
+
+ *next_to_clean = ntc;
+
+ return mbuf_head;
+}
+
static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
@@ -2060,16 +2147,10 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
unsigned int ring_mask = ring_size - 1;
uint16_t next_to_clean = rx_ring->next_to_clean;
uint16_t desc_in_use = 0;
- uint16_t req_id;
- unsigned int recv_idx = 0;
- struct rte_mbuf *mbuf = NULL;
- struct rte_mbuf *mbuf_head = NULL;
- struct rte_mbuf *mbuf_prev = NULL;
- struct rte_mbuf **rx_buff_info = rx_ring->rx_buffer_info;
- unsigned int completed;
-
+ struct rte_mbuf *mbuf;
+ uint16_t completed;
struct ena_com_rx_ctx ena_rx_ctx;
- int rc = 0;
+ int i, rc = 0;
/* Check adapter state */
if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
@@ -2083,8 +2164,6 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_pkts = desc_in_use;
for (completed = 0; completed < nb_pkts; completed++) {
- int segments = 0;
-
ena_rx_ctx.max_bufs = rx_ring->sgl_size;
ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
ena_rx_ctx.descs = 0;
@@ -2102,63 +2181,36 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return 0;
}
- if (unlikely(ena_rx_ctx.descs == 0))
- break;
-
- while (segments < ena_rx_ctx.descs) {
- req_id = ena_rx_ctx.ena_bufs[segments].req_id;
- rc = validate_rx_req_id(rx_ring, req_id);
- if (unlikely(rc)) {
- if (segments != 0)
- rte_mbuf_raw_free(mbuf_head);
- break;
- }
-
- mbuf = rx_buff_info[req_id];
- rx_buff_info[req_id] = NULL;
- mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len;
- mbuf->data_off = RTE_PKTMBUF_HEADROOM;
- mbuf->refcnt = 1;
- mbuf->next = NULL;
- if (unlikely(segments == 0)) {
- mbuf->nb_segs = ena_rx_ctx.descs;
- mbuf->port = rx_ring->port_id;
- mbuf->pkt_len = 0;
- mbuf->data_off += ena_rx_ctx.pkt_offset;
- mbuf_head = mbuf;
- } else {
- /* for multi-segment pkts create mbuf chain */
- mbuf_prev->next = mbuf;
+ mbuf = ena_rx_mbuf(rx_ring,
+ ena_rx_ctx.ena_bufs,
+ ena_rx_ctx.descs,
+ &next_to_clean,
+ ena_rx_ctx.pkt_offset);
+ if (unlikely(mbuf == NULL)) {
+ for (i = 0; i < ena_rx_ctx.descs; ++i) {
+ rx_ring->empty_rx_reqs[next_to_clean & ring_mask] =
+ rx_ring->ena_bufs[i].req_id;
+ ++next_to_clean;
}
- mbuf_head->pkt_len += mbuf->data_len;
-
- mbuf_prev = mbuf;
- rx_ring->empty_rx_reqs[next_to_clean & ring_mask] =
- req_id;
- segments++;
- next_to_clean++;
- }
- if (unlikely(rc))
break;
+ }
/* fill mbuf attributes if any */
- ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx);
+ ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx);
- if (unlikely(mbuf_head->ol_flags &
+ if (unlikely(mbuf->ol_flags &
(PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) {
rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors);
++rx_ring->rx_stats.bad_csum;
}
- mbuf_head->hash.rss = ena_rx_ctx.hash;
+ mbuf->hash.rss = ena_rx_ctx.hash;
- /* pass to DPDK application head mbuf */
- rx_pkts[recv_idx] = mbuf_head;
- recv_idx++;
- rx_ring->rx_stats.bytes += mbuf_head->pkt_len;
+ rx_pkts[completed] = mbuf;
+ rx_ring->rx_stats.bytes += mbuf->pkt_len;
}
- rx_ring->rx_stats.cnt += recv_idx;
+ rx_ring->rx_stats.cnt += completed;
rx_ring->next_to_clean = next_to_clean;
desc_in_use = desc_in_use - completed + 1;
@@ -2168,7 +2220,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
ena_populate_rx_queue(rx_ring, ring_size - desc_in_use);
}
- return recv_idx;
+ return completed;
}
static uint16_t
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 99191ac3e7..9df34136da 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -44,6 +44,12 @@ struct ena_tx_buffer {
struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
};
+/* Rx buffer holds only pointer to the mbuf - may be expanded in the future */
+struct ena_rx_buffer {
+ struct rte_mbuf *mbuf;
+ struct ena_com_buf ena_buf;
+};
+
struct ena_calc_queue_size_ctx {
struct ena_com_dev_get_features_ctx *get_feat_ctx;
struct ena_com_dev *ena_dev;
@@ -89,7 +95,7 @@ struct ena_ring {
union {
struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
- struct rte_mbuf **rx_buffer_info; /* contex of rx packet */
+ struct ena_rx_buffer *rx_buffer_info; /* contex of rx packet */
};
struct rte_mbuf **rx_refill_buffer;
unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
--
2.20.1
next prev parent reply other threads:[~2020-04-01 14:25 UTC|newest]
Thread overview: 46+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-01 14:20 [dpdk-dev] [PATCH v2 00/29] Update ENA driver to v2.1.0 Michal Krawczyk
2020-04-01 14:20 ` [dpdk-dev] [PATCH v2 01/29] net/ena: check if size of buffer is at least 1400B Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 02/29] net/ena/base: make allocation macros thread-safe Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 03/29] net/ena/base: prevent allocation of 0-sized memory Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 04/29] net/ena/base: set default hash key Michal Krawczyk
2020-04-02 12:36 ` Ferruh Yigit
2020-04-03 11:10 ` Michał Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 05/29] net/ena/base: rework interrupt moderation Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 06/29] net/ena/base: remove extra properties strings Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 07/29] net/ena/base: add accelerated LLQ mode Michal Krawczyk
2020-04-02 12:41 ` Ferruh Yigit
2020-04-03 11:15 ` Michał Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 08/29] net/ena/base: fix documentation of the functions Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 09/29] net/ena/base: fix indentation in cq polling Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 10/29] net/ena/base: add error logs when preparing Tx Michal Krawczyk
2020-04-02 12:53 ` Ferruh Yigit
2020-04-03 11:31 ` Michał Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 11/29] net/ena/base: use 48-bit memory addresses in ena_com Michal Krawczyk
2020-04-02 12:55 ` Ferruh Yigit
2020-04-03 12:14 ` Michał Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 12/29] net/ena/base: fix types for printing timestamps Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 13/29] net/ena/base: fix indentation of multiple defines Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 14/29] net/ena/base: update gen date and commit Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 15/29] net/ena: set IO ring size to the valid value Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 16/29] net/ena: refactor getting IO queues capabilities Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 17/29] net/ena: add support for large LLQ headers Michal Krawczyk
2020-04-02 13:02 ` Ferruh Yigit
2020-04-03 12:15 ` Michał Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 18/29] net/ena: remove memory barriers before doorbells Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 19/29] net/ena: add Tx drops statistic Michal Krawczyk
2020-04-02 13:05 ` Ferruh Yigit
2020-04-03 12:30 ` Michał Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 20/29] net/ena: disable meta caching Michal Krawczyk
2020-04-01 14:21 ` Michal Krawczyk [this message]
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 22/29] net/ena: rework getting number of available descs Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 23/29] net/ena: limit refill threshold by fixed value Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 24/29] net/ena: use macros for ring idx operations Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 25/29] net/ena: refactor Tx path Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 26/29] net/ena: reuse 0 length Rx descriptor Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 27/29] doc: add notes on ENA usage on metal instances Michal Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 28/29] net/ena: update copyright date Michal Krawczyk
2020-04-02 15:51 ` Ferruh Yigit
2020-04-03 12:31 ` Michał Krawczyk
2020-04-01 14:21 ` [dpdk-dev] [PATCH v2 29/29] net/ena: update version of the driver to v2.1.0 Michal Krawczyk
2020-04-02 15:53 ` Ferruh Yigit
2020-04-03 12:32 ` Michał Krawczyk
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200401142127.13715-22-mk@semihalf.com \
--to=mk@semihalf.com \
--cc=dev@dpdk.org \
--cc=evgenys@amazon.com \
--cc=gtzalik@amazon.com \
--cc=igorch@amazon.com \
--cc=mba@semihalf.com \
--cc=mw@semihalf.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).