DPDK patches and discussions
 help / color / mirror / Atom feed
From: Bruce Richardson <bruce.richardson@intel.com>
To: dev@dpdk.org
Cc: olivier.matz@6wind.com, jerin.jacob@caviumnetworks.com,
	Bruce Richardson <bruce.richardson@intel.com>
Subject: [dpdk-dev] [PATCH v2 5/5] event/sw: change worker rings to standard event rings
Date: Fri, 30 Jun 2017 16:06:21 +0100	[thread overview]
Message-ID: <20170630150621.156365-6-bruce.richardson@intel.com> (raw)
In-Reply-To: <20170630150621.156365-1-bruce.richardson@intel.com>

Now that we have a standard event ring implementation for passing events
core-to-core, use that in place of the custom event rings in the software
eventdev.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 drivers/event/sw/sw_evdev.c           | 38 +++++++++++++++++++----------------
 drivers/event/sw/sw_evdev.h           |  4 ++--
 drivers/event/sw/sw_evdev_scheduler.c | 19 +++++++++---------
 drivers/event/sw/sw_evdev_worker.c    | 28 +++++++++++++++++++++-----
 drivers/event/sw/sw_evdev_xstats.c    | 15 +++++++-------
 5 files changed, 64 insertions(+), 40 deletions(-)

diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index fe2a61e2f..31880aa5c 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -38,10 +38,10 @@
 #include <rte_kvargs.h>
 #include <rte_ring.h>
 #include <rte_errno.h>
+#include <rte_event_ring.h>
 
 #include "sw_evdev.h"
 #include "iq_ring.h"
-#include "event_ring.h"
 
 #define EVENTDEV_NAME_SW_PMD event_sw
 #define NUMA_NODE_ARG "numa_node"
@@ -140,7 +140,7 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
 {
 	struct sw_evdev *sw = sw_pmd_priv(dev);
 	struct sw_port *p = &sw->ports[port_id];
-	char buf[QE_RING_NAMESIZE];
+	char buf[RTE_RING_NAMESIZE];
 	unsigned int i;
 
 	struct rte_event_dev_info info;
@@ -161,10 +161,11 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
 	p->id = port_id;
 	p->sw = sw;
 
-	snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
-			"rx_worker_ring");
-	p->rx_worker_ring = qe_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
-			dev->data->socket_id);
+	snprintf(buf, sizeof(buf), "sw%d_p%u_%s", dev->data->dev_id,
+			port_id, "rx_worker_ring");
+	p->rx_worker_ring = rte_event_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
+			dev->data->socket_id,
+			RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
 	if (p->rx_worker_ring == NULL) {
 		SW_LOG_ERR("Error creating RX worker ring for port %d\n",
 				port_id);
@@ -173,12 +174,13 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
 
 	p->inflight_max = conf->new_event_threshold;
 
-	snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
-			"cq_worker_ring");
-	p->cq_worker_ring = qe_ring_create(buf, conf->dequeue_depth,
-			dev->data->socket_id);
+	snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
+			port_id, "cq_worker_ring");
+	p->cq_worker_ring = rte_event_ring_create(buf, conf->dequeue_depth,
+			dev->data->socket_id,
+			RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
 	if (p->cq_worker_ring == NULL) {
-		qe_ring_destroy(p->rx_worker_ring);
+		rte_event_ring_free(p->rx_worker_ring);
 		SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
 				port_id);
 		return -1;
@@ -204,8 +206,8 @@ sw_port_release(void *port)
 	if (p == NULL)
 		return;
 
-	qe_ring_destroy(p->rx_worker_ring);
-	qe_ring_destroy(p->cq_worker_ring);
+	rte_event_ring_free(p->rx_worker_ring);
+	rte_event_ring_free(p->cq_worker_ring);
 	memset(p, 0, sizeof(*p));
 }
 
@@ -512,8 +514,9 @@ sw_dump(struct rte_eventdev *dev, FILE *f)
 		fprintf(f, "\n");
 
 		if (p->rx_worker_ring) {
-			uint64_t used = qe_ring_count(p->rx_worker_ring);
-			uint64_t space = qe_ring_free_count(p->rx_worker_ring);
+			uint64_t used = rte_event_ring_count(p->rx_worker_ring);
+			uint64_t space = rte_event_ring_free_count(
+					p->rx_worker_ring);
 			const char *col = (space == 0) ? COL_RED : COL_RESET;
 			fprintf(f, "\t%srx ring used: %4"PRIu64"\tfree: %4"
 					PRIu64 COL_RESET"\n", col, used, space);
@@ -521,8 +524,9 @@ sw_dump(struct rte_eventdev *dev, FILE *f)
 			fprintf(f, "\trx ring not initialized.\n");
 
 		if (p->cq_worker_ring) {
-			uint64_t used = qe_ring_count(p->cq_worker_ring);
-			uint64_t space = qe_ring_free_count(p->cq_worker_ring);
+			uint64_t used = rte_event_ring_count(p->cq_worker_ring);
+			uint64_t space = rte_event_ring_free_count(
+					p->cq_worker_ring);
 			const char *col = (space == 0) ? COL_RED : COL_RESET;
 			fprintf(f, "\t%scq ring used: %4"PRIu64"\tfree: %4"
 					PRIu64 COL_RESET"\n", col, used, space);
diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
index 0d7f94f3b..6ef03ceb8 100644
--- a/drivers/event/sw/sw_evdev.h
+++ b/drivers/event/sw/sw_evdev.h
@@ -190,9 +190,9 @@ struct sw_port {
 	int16_t num_ordered_qids;
 
 	/** Ring and buffer for pulling events from workers for scheduling */
-	struct qe_ring *rx_worker_ring __rte_cache_aligned;
+	struct rte_event_ring *rx_worker_ring __rte_cache_aligned;
 	/** Ring and buffer for pushing packets to workers after scheduling */
-	struct qe_ring *cq_worker_ring;
+	struct rte_event_ring *cq_worker_ring;
 
 	/* hole */
 
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index fe1551706..8a2c9d4f9 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -32,9 +32,9 @@
 
 #include <rte_ring.h>
 #include <rte_hash_crc.h>
+#include <rte_event_ring.h>
 #include "sw_evdev.h"
 #include "iq_ring.h"
-#include "event_ring.h"
 
 #define SW_IQS_MASK (SW_IQS_MAX-1)
 
@@ -123,8 +123,8 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
 
 		/* if we just filled in the last slot, flush the buffer */
 		if (sw->cq_ring_space[cq] == 0) {
-			struct qe_ring *worker = p->cq_worker_ring;
-			qe_ring_enqueue_burst(worker, p->cq_buf,
+			struct rte_event_ring *worker = p->cq_worker_ring;
+			rte_event_ring_enqueue_burst(worker, p->cq_buf,
 					p->cq_buf_count,
 					&sw->cq_ring_space[cq]);
 			p->cq_buf_count = 0;
@@ -171,7 +171,8 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
 			cq = qid->cq_map[cq_idx];
 			if (++cq_idx == qid->cq_num_mapped_cqs)
 				cq_idx = 0;
-		} while (qe_ring_free_count(sw->ports[cq].cq_worker_ring) == 0 ||
+		} while (rte_event_ring_free_count(
+				sw->ports[cq].cq_worker_ring) == 0 ||
 				sw->ports[cq].inflights == SW_PORT_HIST_LIST);
 
 		struct sw_port *p = &sw->ports[cq];
@@ -367,10 +368,10 @@ static __rte_always_inline void
 sw_refill_pp_buf(struct sw_evdev *sw, struct sw_port *port)
 {
 	RTE_SET_USED(sw);
-	struct qe_ring *worker = port->rx_worker_ring;
+	struct rte_event_ring *worker = port->rx_worker_ring;
 	port->pp_buf_start = 0;
-	port->pp_buf_count = qe_ring_dequeue_burst(worker, port->pp_buf,
-			RTE_DIM(port->pp_buf));
+	port->pp_buf_count = rte_event_ring_dequeue_burst(worker, port->pp_buf,
+			RTE_DIM(port->pp_buf), NULL);
 }
 
 static __rte_always_inline uint32_t
@@ -586,8 +587,8 @@ sw_event_schedule(struct rte_eventdev *dev)
 	 * worker cores: aka, do the ring transfers batched.
 	 */
 	for (i = 0; i < sw->port_count; i++) {
-		struct qe_ring *worker = sw->ports[i].cq_worker_ring;
-		qe_ring_enqueue_burst(worker, sw->ports[i].cq_buf,
+		struct rte_event_ring *worker = sw->ports[i].cq_worker_ring;
+		rte_event_ring_enqueue_burst(worker, sw->ports[i].cq_buf,
 				sw->ports[i].cq_buf_count,
 				&sw->cq_ring_space[i]);
 		sw->ports[i].cq_buf_count = 0;
diff --git a/drivers/event/sw/sw_evdev_worker.c b/drivers/event/sw/sw_evdev_worker.c
index b738506ac..d76d3d5c8 100644
--- a/drivers/event/sw/sw_evdev_worker.c
+++ b/drivers/event/sw/sw_evdev_worker.c
@@ -32,9 +32,9 @@
 
 #include <rte_atomic.h>
 #include <rte_cycles.h>
+#include <rte_event_ring.h>
 
 #include "sw_evdev.h"
-#include "event_ring.h"
 
 #define PORT_ENQUEUE_MAX_BURST_SIZE 64
 
@@ -52,13 +52,31 @@ sw_event_release(struct sw_port *p, uint8_t index)
 	ev.op = sw_qe_flag_map[RTE_EVENT_OP_RELEASE];
 
 	uint16_t free_count;
-	qe_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count);
+	rte_event_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count);
 
 	/* each release returns one credit */
 	p->outstanding_releases--;
 	p->inflight_credits++;
 }
 
+/*
+ * special-case of rte_event_ring enqueue, with overriding the ops member on
+ * the events that get written to the ring.
+ */
+static inline unsigned int
+enqueue_burst_with_ops(struct rte_event_ring *r, const struct rte_event *events,
+		unsigned int n, uint8_t *ops)
+{
+	struct rte_event tmp_evs[PORT_ENQUEUE_MAX_BURST_SIZE];
+	unsigned int i;
+
+	memcpy(tmp_evs, events, n * sizeof(events[0]));
+	for (i = 0; i < n; i++)
+		tmp_evs[i].op = ops[i];
+
+	return rte_event_ring_enqueue_burst(r, tmp_evs, n, NULL);
+}
+
 uint16_t
 sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
 {
@@ -119,7 +137,7 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
 	p->inflight_credits -= forwards * p->is_directed;
 
 	/* returns number of events actually enqueued */
-	uint32_t enq = qe_ring_enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
+	uint32_t enq = enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
 					     new_ops);
 	if (p->outstanding_releases == 0 && p->last_dequeue_burst_sz != 0) {
 		uint64_t burst_ticks = rte_get_timer_cycles() -
@@ -146,7 +164,7 @@ sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
 	RTE_SET_USED(wait);
 	struct sw_port *p = (void *)port;
 	struct sw_evdev *sw = (void *)p->sw;
-	struct qe_ring *ring = p->cq_worker_ring;
+	struct rte_event_ring *ring = p->cq_worker_ring;
 	uint32_t credit_update_quanta = sw->credit_update_quanta;
 
 	/* check that all previous dequeues have been released */
@@ -158,7 +176,7 @@ sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
 	}
 
 	/* returns number of events actually dequeued */
-	uint16_t ndeq = qe_ring_dequeue_burst(ring, ev, num);
+	uint16_t ndeq = rte_event_ring_dequeue_burst(ring, ev, num, NULL);
 	if (unlikely(ndeq == 0)) {
 		p->outstanding_releases = 0;
 		p->zero_polls++;
diff --git a/drivers/event/sw/sw_evdev_xstats.c b/drivers/event/sw/sw_evdev_xstats.c
index 7b66fbb15..8cb6d88d1 100644
--- a/drivers/event/sw/sw_evdev_xstats.c
+++ b/drivers/event/sw/sw_evdev_xstats.c
@@ -30,9 +30,9 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <rte_event_ring.h>
 #include "sw_evdev.h"
 #include "iq_ring.h"
-#include "event_ring.h"
 
 enum xstats_type {
 	/* common stats */
@@ -105,10 +105,10 @@ get_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
 	case calls: return p->total_polls;
 	case credits: return p->inflight_credits;
 	case poll_return: return p->zero_polls;
-	case rx_used: return qe_ring_count(p->rx_worker_ring);
-	case rx_free: return qe_ring_free_count(p->rx_worker_ring);
-	case tx_used: return qe_ring_count(p->cq_worker_ring);
-	case tx_free: return qe_ring_free_count(p->cq_worker_ring);
+	case rx_used: return rte_event_ring_count(p->rx_worker_ring);
+	case rx_free: return rte_event_ring_free_count(p->rx_worker_ring);
+	case tx_used: return rte_event_ring_count(p->cq_worker_ring);
+	case tx_free: return rte_event_ring_free_count(p->cq_worker_ring);
 	default: return -1;
 	}
 }
@@ -318,8 +318,9 @@ sw_xstats_init(struct sw_evdev *sw)
 					port, port_stats[i]);
 		}
 
-		for (bkt = 0; bkt < (sw->ports[port].cq_worker_ring->size >>
-				SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
+		for (bkt = 0; bkt < (rte_event_ring_get_capacity(
+				sw->ports[port].cq_worker_ring) >>
+					SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
 			for (i = 0; i < RTE_DIM(port_bucket_stats); i++) {
 				sw->xstats[stat] = (struct sw_xstats_entry){
 					.fn = get_port_bucket_stat,
-- 
2.13.0

  parent reply	other threads:[~2017-06-30 15:06 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-06-07 13:36 [dpdk-dev] [PATCH 0/5] create event rings type Bruce Richardson
2017-06-07 13:36 ` [dpdk-dev] [PATCH 1/5] ring: allow rings with non power-of-2 sizes Bruce Richardson
2017-06-30  9:40   ` Olivier Matz
2017-06-30 11:32     ` Bruce Richardson
2017-06-30 12:24       ` Olivier Matz
2017-06-30 13:59         ` Bruce Richardson
2017-06-07 13:36 ` [dpdk-dev] [PATCH 2/5] test/test: add unit tests for exact size rings Bruce Richardson
2017-06-30  9:42   ` Olivier Matz
2017-06-07 13:36 ` [dpdk-dev] [PATCH 3/5] eventdev: add ring structure for events Bruce Richardson
2017-06-12  5:15   ` Jerin Jacob
2017-06-12  8:53     ` Bruce Richardson
2017-06-30 13:24     ` Bruce Richardson
2017-06-07 13:36 ` [dpdk-dev] [PATCH 4/5] test/test: add auto-tests for event ring functions Bruce Richardson
2017-06-07 13:36 ` [dpdk-dev] [PATCH 5/5] event/sw: change worker rings to standard event rings Bruce Richardson
2017-06-30 15:06 ` [dpdk-dev] [PATCH v2 0/5] create event rings type Bruce Richardson
2017-06-30 15:06   ` [dpdk-dev] [PATCH v2 1/5] ring: allow rings with non power-of-2 sizes Bruce Richardson
2017-07-03  8:46     ` Olivier Matz
2017-06-30 15:06   ` [dpdk-dev] [PATCH v2 2/5] test/test: add unit tests for exact size rings Bruce Richardson
2017-07-03  8:47     ` Olivier Matz
2017-06-30 15:06   ` [dpdk-dev] [PATCH v2 3/5] eventdev: add ring structure for events Bruce Richardson
2017-07-03  9:52     ` Van Haaren, Harry
2017-06-30 15:06   ` [dpdk-dev] [PATCH v2 4/5] test/test: add auto-tests for event ring functions Bruce Richardson
2017-07-03 12:30     ` Van Haaren, Harry
2017-06-30 15:06   ` Bruce Richardson [this message]
2017-07-03 12:28     ` [dpdk-dev] [PATCH v2 5/5] event/sw: change worker rings to standard event rings Van Haaren, Harry
2017-07-03 12:44       ` Jerin Jacob
2017-07-03 13:01         ` Van Haaren, Harry
2017-07-04  5:36           ` Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170630150621.156365-6-bruce.richardson@intel.com \
    --to=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=jerin.jacob@caviumnetworks.com \
    --cc=olivier.matz@6wind.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).