From: Nikhil Rao <nikhil.rao@intel.com>
To: jerin.jacob@caviumnetworks.com
Cc: nikhil.rao@intel.com, dev@dpdk.org
Subject: [dpdk-dev] [PATCH v4 3/5] eventdev: move Rx adapter eth Rx to separate function
Date: Mon, 2 Jul 2018 07:25:28 +0530 [thread overview]
Message-ID: <1530496530-112764-4-git-send-email-nikhil.rao@intel.com> (raw)
In-Reply-To: <1530496530-112764-1-git-send-email-nikhil.rao@intel.com>
Create a separate function that handles eth receive and
enqueue to event buffer. This function will also be called for
interrupt driven receive queues.
Signed-off-by: Nikhil Rao <nikhil.rao@intel.com>
Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
---
lib/librte_eventdev/rte_event_eth_rx_adapter.c | 67 ++++++++++++++++++--------
1 file changed, 47 insertions(+), 20 deletions(-)
diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
index 926f83a..8fe037f 100644
--- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
@@ -616,6 +616,45 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
}
}
+/* Enqueue packets from <port, q> to event buffer */
+static inline uint32_t
+rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint16_t port_id,
+ uint16_t queue_id,
+ uint32_t rx_count,
+ uint32_t max_rx)
+{
+ struct rte_mbuf *mbufs[BATCH_SIZE];
+ struct rte_eth_event_enqueue_buffer *buf =
+ &rx_adapter->event_enqueue_buffer;
+ struct rte_event_eth_rx_adapter_stats *stats =
+ &rx_adapter->stats;
+ uint16_t n;
+ uint32_t nb_rx = 0;
+
+ /* Don't do a batch dequeue from the rx queue if there isn't
+ * enough space in the enqueue buffer.
+ */
+ while (BATCH_SIZE <= (RTE_DIM(buf->events) - buf->count)) {
+ if (buf->count >= BATCH_SIZE)
+ rxa_flush_event_buffer(rx_adapter);
+
+ stats->rx_poll_count++;
+ n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
+ if (unlikely(!n))
+ break;
+ rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n);
+ nb_rx += n;
+ if (rx_count + nb_rx > max_rx)
+ break;
+ }
+
+ if (buf->count >= BATCH_SIZE)
+ rxa_flush_event_buffer(rx_adapter);
+
+ return nb_rx;
+}
+
/*
* Polls receive queues added to the event adapter and enqueues received
* packets to the event device.
@@ -633,17 +672,16 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
{
uint32_t num_queue;
- uint16_t n;
uint32_t nb_rx = 0;
- struct rte_mbuf *mbufs[BATCH_SIZE];
struct rte_eth_event_enqueue_buffer *buf;
uint32_t wrr_pos;
uint32_t max_nb_rx;
+ struct rte_event_eth_rx_adapter_stats *stats;
wrr_pos = rx_adapter->wrr_pos;
max_nb_rx = rx_adapter->max_nb_rx;
buf = &rx_adapter->event_enqueue_buffer;
- struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
+ stats = &rx_adapter->stats;
/* Iterate through a WRR sequence */
for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
@@ -658,32 +696,21 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
rxa_flush_event_buffer(rx_adapter);
if (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count)) {
rx_adapter->wrr_pos = wrr_pos;
- return;
+ break;
}
- stats->rx_poll_count++;
- n = rte_eth_rx_burst(d, qid, mbufs, BATCH_SIZE);
-
- if (n) {
- stats->rx_packets += n;
- /* The check before rte_eth_rx_burst() ensures that
- * all n mbufs can be buffered
- */
- rxa_buffer_mbufs(rx_adapter, d, qid, mbufs, n);
- nb_rx += n;
- if (nb_rx > max_nb_rx) {
- rx_adapter->wrr_pos =
+ nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx);
+ if (nb_rx > max_nb_rx) {
+ rx_adapter->wrr_pos =
(wrr_pos + 1) % rx_adapter->wrr_len;
- break;
- }
+ break;
}
if (++wrr_pos == rx_adapter->wrr_len)
wrr_pos = 0;
}
- if (buf->count >= BATCH_SIZE)
- rxa_flush_event_buffer(rx_adapter);
+ stats->rx_packets += nb_rx;
}
static int
--
1.8.3.1
next prev parent reply other threads:[~2018-07-02 1:55 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-07-02 1:55 [dpdk-dev] [PATCH v4 0/5] eventdev: add interrupt driven queues to Rx adapter Nikhil Rao
2018-07-02 1:55 ` [dpdk-dev] [PATCH v4 1/5] eventdev: standardize Rx adapter internal function names Nikhil Rao
2018-07-02 1:55 ` [dpdk-dev] [PATCH v4 2/5] eventdev: improve err handling for Rx adapter queue add/del Nikhil Rao
2018-07-02 1:55 ` Nikhil Rao [this message]
2018-07-02 1:55 ` [dpdk-dev] [PATCH v4 4/5] eventdev: add interrupt driven queues to Rx adapter Nikhil Rao
2018-07-02 1:55 ` [dpdk-dev] [PATCH v4 5/5] eventdev: add Rx adapter tests for interrupt driven queues Nikhil Rao
2018-07-02 7:43 ` [dpdk-dev] [PATCH v4 0/5] eventdev: add interrupt driven queues to Rx adapter Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1530496530-112764-4-git-send-email-nikhil.rao@intel.com \
--to=nikhil.rao@intel.com \
--cc=dev@dpdk.org \
--cc=jerin.jacob@caviumnetworks.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).