DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/3] eventdev/eth_rx: add queue stats get and reset APIs
@ 2021-10-28  4:54 Naga Harish K S V
  2021-10-28  4:54 ` [dpdk-dev] [PATCH 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
                   ` (3 more replies)
  0 siblings, 4 replies; 17+ messages in thread
From: Naga Harish K S V @ 2021-10-28  4:54 UTC (permalink / raw)
  To: jerinj, jay.jayatheerthan; +Cc: dev

This patch adds new api ``rte_event_eth_rx_adapter_queue_stats_get`` to
retrieve queue stats. The queue stats are in the format
``struct rte_event_eth_rx_adapter_queue_stats``.

For resetting the queue stats,
``rte_event_eth_rx_adapter_queue_stats_reset`` api is added.

The adapter stats_get and stats_reset apis are also updated to
handle queue level event buffer use case.

Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
---
 .../prog_guide/event_ethernet_rx_adapter.rst  |  11 +
 lib/eventdev/rte_event_eth_rx_adapter.c       | 252 +++++++++++++++---
 lib/eventdev/rte_event_eth_rx_adapter.h       |  66 +++++
 lib/eventdev/version.map                      |   2 +
 4 files changed, 288 insertions(+), 43 deletions(-)

diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
index 8b58130fc5..67b11e1563 100644
--- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
+++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
@@ -166,6 +166,17 @@ flags for handling received packets, event queue identifier, scheduler type,
 event priority, polling frequency of the receive queue and flow identifier
 in struct ``rte_event_eth_rx_adapter_queue_conf``.
 
+Getting and resetting Adapter queue stats
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``rte_event_eth_rx_adapter_queue_stats_get()`` function reports
+adapter queue counters defined in struct ``rte_event_eth_rx_adapter_queue_stats``.
+This function reports queue level stats only when queue level event buffer is
+used otherwise it returns -EINVAL.
+
+The ``rte_event_eth_rx_adapter_queue_stats_reset`` function can be used to
+reset queue level stats when queue level event buffer is in use.
+
 Interrupt Based Rx Queues
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
 
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index a175c61551..ec5b2ef7a6 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -245,6 +245,10 @@ struct eth_rx_queue_info {
 	uint64_t event;
 	struct eth_rx_vector_data vector_data;
 	struct eth_event_enqueue_buffer *event_buf;
+	/* use adapter stats struct for queue level stats,
+	 * as same stats need to be updated for adapter and queue
+	 */
+	struct rte_event_eth_rx_adapter_stats *stats;
 };
 
 static struct event_eth_rx_adapter **event_eth_rx_adapter;
@@ -268,14 +272,18 @@ rxa_validate_id(uint8_t id)
 
 static inline struct eth_event_enqueue_buffer *
 rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
-		  uint16_t rx_queue_id)
+		  uint16_t rx_queue_id,
+		  struct rte_event_eth_rx_adapter_stats **stats)
 {
 	if (rx_adapter->use_queue_event_buf) {
 		struct eth_device_info *dev_info =
 			&rx_adapter->eth_devices[eth_dev_id];
+		*stats = dev_info->rx_queue[rx_queue_id].stats;
 		return dev_info->rx_queue[rx_queue_id].event_buf;
-	} else
+	} else {
+		*stats = &rx_adapter->stats;
 		return &rx_adapter->event_enqueue_buffer;
+	}
 }
 
 #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
@@ -766,9 +774,9 @@ rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
 /* Enqueue buffered events to event device */
 static inline uint16_t
 rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
-		       struct eth_event_enqueue_buffer *buf)
+		       struct eth_event_enqueue_buffer *buf,
+		       struct rte_event_eth_rx_adapter_stats *stats)
 {
-	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
 	uint16_t count = buf->last ? buf->last - buf->head : buf->count;
 
 	if (!count)
@@ -883,7 +891,8 @@ rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
 static inline void
 rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
 		 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
-		 struct eth_event_enqueue_buffer *buf)
+		 struct eth_event_enqueue_buffer *buf,
+		 struct rte_event_eth_rx_adapter_stats *stats)
 {
 	uint32_t i;
 	struct eth_device_info *dev_info =
@@ -954,7 +963,7 @@ rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
 		else
 			num = nb_cb;
 		if (dropped)
-			rx_adapter->stats.rx_dropped += dropped;
+			stats->rx_dropped += dropped;
 	}
 
 	buf->count += num;
@@ -985,11 +994,10 @@ rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
 static inline uint32_t
 rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
 	   uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
-	   int *rxq_empty, struct eth_event_enqueue_buffer *buf)
+	   int *rxq_empty, struct eth_event_enqueue_buffer *buf,
+	   struct rte_event_eth_rx_adapter_stats *stats)
 {
 	struct rte_mbuf *mbufs[BATCH_SIZE];
-	struct rte_event_eth_rx_adapter_stats *stats =
-					&rx_adapter->stats;
 	uint16_t n;
 	uint32_t nb_rx = 0;
 
@@ -1000,7 +1008,7 @@ rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
 	 */
 	while (rxa_pkt_buf_available(buf)) {
 		if (buf->count >= BATCH_SIZE)
-			rxa_flush_event_buffer(rx_adapter, buf);
+			rxa_flush_event_buffer(rx_adapter, buf, stats);
 
 		stats->rx_poll_count++;
 		n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
@@ -1009,14 +1017,17 @@ rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
 				*rxq_empty = 1;
 			break;
 		}
-		rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf);
+		rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf,
+				 stats);
 		nb_rx += n;
 		if (rx_count + nb_rx > max_rx)
 			break;
 	}
 
 	if (buf->count > 0)
-		rxa_flush_event_buffer(rx_adapter, buf);
+		rxa_flush_event_buffer(rx_adapter, buf, stats);
+
+	stats->rx_packets += nb_rx;
 
 	return nb_rx;
 }
@@ -1135,28 +1146,30 @@ rxa_intr_thread(void *arg)
 /* Dequeue <port, q> from interrupt ring and enqueue received
  * mbufs to eventdev
  */
-static inline uint32_t
+static inline void
 rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t n;
 	uint32_t nb_rx = 0;
 	int rxq_empty;
 	struct eth_event_enqueue_buffer *buf;
+	struct rte_event_eth_rx_adapter_stats *stats;
 	rte_spinlock_t *ring_lock;
 	uint8_t max_done = 0;
 
 	if (rx_adapter->num_rx_intr == 0)
-		return 0;
+		return;
 
 	if (rte_ring_count(rx_adapter->intr_ring) == 0
 		&& !rx_adapter->qd_valid)
-		return 0;
+		return;
 
 	buf = &rx_adapter->event_enqueue_buffer;
+	stats = &rx_adapter->stats;
 	ring_lock = &rx_adapter->intr_ring_lock;
 
 	if (buf->count >= BATCH_SIZE)
-		rxa_flush_event_buffer(rx_adapter, buf);
+		rxa_flush_event_buffer(rx_adapter, buf, stats);
 
 	while (rxa_pkt_buf_available(buf)) {
 		struct eth_device_info *dev_info;
@@ -1208,7 +1221,7 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 					continue;
 				n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
 					rx_adapter->max_nb_rx,
-					&rxq_empty, buf);
+					&rxq_empty, buf, stats);
 				nb_rx += n;
 
 				enq_buffer_full = !rxq_empty && n == 0;
@@ -1229,7 +1242,7 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 		} else {
 			n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
 				rx_adapter->max_nb_rx,
-				&rxq_empty, buf);
+				&rxq_empty, buf, stats);
 			rx_adapter->qd_valid = !rxq_empty;
 			nb_rx += n;
 			if (nb_rx > rx_adapter->max_nb_rx)
@@ -1239,7 +1252,6 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 
 done:
 	rx_adapter->stats.rx_intr_packets += nb_rx;
-	return nb_rx;
 }
 
 /*
@@ -1255,12 +1267,13 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
  * the hypervisor's switching layer where adjustments can be made to deal with
  * it.
  */
-static inline uint32_t
+static inline void
 rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t num_queue;
 	uint32_t nb_rx = 0;
 	struct eth_event_enqueue_buffer *buf = NULL;
+	struct rte_event_eth_rx_adapter_stats *stats = NULL;
 	uint32_t wrr_pos;
 	uint32_t max_nb_rx;
 
@@ -1273,24 +1286,24 @@ rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 		uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
 		uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
 
-		buf = rxa_event_buf_get(rx_adapter, d, qid);
+		buf = rxa_event_buf_get(rx_adapter, d, qid, &stats);
 
 		/* Don't do a batch dequeue from the rx queue if there isn't
 		 * enough space in the enqueue buffer.
 		 */
 		if (buf->count >= BATCH_SIZE)
-			rxa_flush_event_buffer(rx_adapter, buf);
+			rxa_flush_event_buffer(rx_adapter, buf, stats);
 		if (!rxa_pkt_buf_available(buf)) {
 			if (rx_adapter->use_queue_event_buf)
 				goto poll_next_entry;
 			else {
 				rx_adapter->wrr_pos = wrr_pos;
-				return nb_rx;
+				return;
 			}
 		}
 
 		nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
-				NULL, buf);
+				NULL, buf, stats);
 		if (nb_rx > max_nb_rx) {
 			rx_adapter->wrr_pos =
 				    (wrr_pos + 1) % rx_adapter->wrr_len;
@@ -1301,7 +1314,6 @@ rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 		if (++wrr_pos == rx_adapter->wrr_len)
 			wrr_pos = 0;
 	}
-	return nb_rx;
 }
 
 static void
@@ -1309,12 +1321,13 @@ rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 {
 	struct event_eth_rx_adapter *rx_adapter = arg;
 	struct eth_event_enqueue_buffer *buf = NULL;
+	struct rte_event_eth_rx_adapter_stats *stats = NULL;
 	struct rte_event *ev;
 
-	buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue);
+	buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue, &stats);
 
 	if (buf->count)
-		rxa_flush_event_buffer(rx_adapter, buf);
+		rxa_flush_event_buffer(rx_adapter, buf, stats);
 
 	if (vec->vector_ev->nb_elem == 0)
 		return;
@@ -1333,7 +1346,6 @@ static int
 rxa_service_func(void *args)
 {
 	struct event_eth_rx_adapter *rx_adapter = args;
-	struct rte_event_eth_rx_adapter_stats *stats;
 
 	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
 		return 0;
@@ -1360,10 +1372,11 @@ rxa_service_func(void *args)
 		}
 	}
 
-	stats = &rx_adapter->stats;
-	stats->rx_packets += rxa_intr_ring_dequeue(rx_adapter);
-	stats->rx_packets += rxa_poll(rx_adapter);
+	rxa_intr_ring_dequeue(rx_adapter);
+	rxa_poll(rx_adapter);
+
 	rte_spinlock_unlock(&rx_adapter->rx_lock);
+
 	return 0;
 }
 
@@ -1937,9 +1950,13 @@ rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
 	if (rx_adapter->use_queue_event_buf) {
 		struct eth_event_enqueue_buffer *event_buf =
 			dev_info->rx_queue[rx_queue_id].event_buf;
+		struct rte_event_eth_rx_adapter_stats *stats =
+			dev_info->rx_queue[rx_queue_id].stats;
 		rte_free(event_buf->events);
 		rte_free(event_buf);
+		rte_free(stats);
 		dev_info->rx_queue[rx_queue_id].event_buf = NULL;
+		dev_info->rx_queue[rx_queue_id].stats = NULL;
 	}
 }
 
@@ -1955,6 +1972,7 @@ rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
 	int sintrq;
 	struct rte_event *qi_ev;
 	struct eth_event_enqueue_buffer *new_rx_buf = NULL;
+	struct rte_event_eth_rx_adapter_stats *stats = NULL;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
 	int ret;
 
@@ -2061,6 +2079,21 @@ rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
 
 	queue_info->event_buf = new_rx_buf;
 
+	/* Allocate storage for adapter queue stats */
+	stats = rte_zmalloc_socket("rx_queue_stats",
+				sizeof(*stats), 0,
+				rte_eth_dev_socket_id(eth_dev_id));
+	if (stats == NULL) {
+		rte_free(new_rx_buf->events);
+		rte_free(new_rx_buf);
+		RTE_EDEV_LOG_ERR("Failed to allocate stats storage for"
+				 " dev_id: %d queue_id: %d",
+				 eth_dev_id, rx_queue_id);
+		return -ENOMEM;
+	}
+
+	queue_info->stats = stats;
+
 	return 0;
 }
 
@@ -2819,6 +2852,15 @@ rte_event_eth_rx_adapter_stop(uint8_t id)
 	return rxa_ctrl(id, 0);
 }
 
+static inline void
+rxa_queue_stats_reset(struct eth_rx_queue_info *queue_info)
+{
+	struct rte_event_eth_rx_adapter_stats *q_stats;
+
+	q_stats = queue_info->stats;
+	memset(q_stats, 0, sizeof(*q_stats));
+}
+
 int
 rte_event_eth_rx_adapter_stats_get(uint8_t id,
 			       struct rte_event_eth_rx_adapter_stats *stats)
@@ -2829,7 +2871,9 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 	struct rte_event_eth_rx_adapter_stats dev_stats;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
-	uint32_t i;
+	struct eth_rx_queue_info *queue_info;
+	struct rte_event_eth_rx_adapter_stats *q_stats;
+	uint32_t i, j;
 	int ret;
 
 	if (rxa_memzone_lookup())
@@ -2843,8 +2887,32 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 
 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
 	memset(stats, 0, sizeof(*stats));
+
+	if (rx_adapter->service_inited)
+		*stats = rx_adapter->stats;
+
 	RTE_ETH_FOREACH_DEV(i) {
 		dev_info = &rx_adapter->eth_devices[i];
+
+		if (rx_adapter->use_queue_event_buf  && dev_info->rx_queue) {
+
+			for (j = 0; j < dev_info->dev->data->nb_rx_queues;
+						j++) {
+				queue_info = &dev_info->rx_queue[j];
+				if (!queue_info->queue_enabled)
+					continue;
+				q_stats = queue_info->stats;
+
+				stats->rx_packets += q_stats->rx_packets;
+				stats->rx_poll_count += q_stats->rx_poll_count;
+				stats->rx_enq_count += q_stats->rx_enq_count;
+				stats->rx_enq_retry += q_stats->rx_enq_retry;
+				stats->rx_dropped += q_stats->rx_dropped;
+				stats->rx_enq_block_cycles +=
+						q_stats->rx_enq_block_cycles;
+			}
+		}
+
 		if (dev_info->internal_event_port == 0 ||
 			dev->dev_ops->eth_rx_adapter_stats_get == NULL)
 			continue;
@@ -2857,21 +2925,63 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 		dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
 	}
 
-	if (rx_adapter->service_inited)
-		*stats = rx_adapter->stats;
-
+	buf = &rx_adapter->event_enqueue_buffer;
 	stats->rx_packets += dev_stats_sum.rx_packets;
 	stats->rx_enq_count += dev_stats_sum.rx_enq_count;
+	stats->rx_event_buf_count = buf->count;
+	stats->rx_event_buf_size = buf->events_size;
 
-	if (!rx_adapter->use_queue_event_buf) {
-		buf = &rx_adapter->event_enqueue_buffer;
-		stats->rx_event_buf_count = buf->count;
-		stats->rx_event_buf_size = buf->events_size;
-	} else {
-		stats->rx_event_buf_count = 0;
-		stats->rx_event_buf_size = 0;
+	return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id,
+		struct rte_event_eth_rx_adapter_queue_stats *stats)
+{
+	struct event_eth_rx_adapter *rx_adapter;
+	struct eth_device_info *dev_info;
+	struct eth_rx_queue_info *queue_info;
+	struct eth_event_enqueue_buffer *event_buf;
+	struct rte_event_eth_rx_adapter_stats *q_stats;
+
+	if (rxa_memzone_lookup())
+		return -ENOMEM;
+
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	rx_adapter = rxa_id_to_adapter(id);
+
+	if (rx_adapter == NULL || stats == NULL)
+		return -EINVAL;
+
+	if (!rx_adapter->use_queue_event_buf)
+		return -EINVAL;
+
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
+		return -EINVAL;
+	}
+
+	dev_info = &rx_adapter->eth_devices[eth_dev_id];
+	if (dev_info->rx_queue == NULL ||
+	    !dev_info->rx_queue[rx_queue_id].queue_enabled) {
+		RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
+		return -EINVAL;
 	}
 
+	queue_info = &dev_info->rx_queue[rx_queue_id];
+	event_buf = queue_info->event_buf;
+	q_stats = queue_info->stats;
+
+	stats->rx_event_buf_count = event_buf->count;
+	stats->rx_event_buf_size = event_buf->events_size;
+	stats->rx_packets = q_stats->rx_packets;
+	stats->rx_poll_count = q_stats->rx_poll_count;
+	stats->rx_dropped = q_stats->rx_dropped;
+
 	return 0;
 }
 
@@ -2881,7 +2991,8 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
-	uint32_t i;
+	struct eth_rx_queue_info *queue_info;
+	uint32_t i, j;
 
 	if (rxa_memzone_lookup())
 		return -ENOMEM;
@@ -2893,8 +3004,21 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 		return -EINVAL;
 
 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
+
 	RTE_ETH_FOREACH_DEV(i) {
 		dev_info = &rx_adapter->eth_devices[i];
+
+		if (rx_adapter->use_queue_event_buf  && dev_info->rx_queue) {
+
+			for (j = 0; j < dev_info->dev->data->nb_rx_queues;
+						j++) {
+				queue_info = &dev_info->rx_queue[j];
+				if (!queue_info->queue_enabled)
+					continue;
+				rxa_queue_stats_reset(queue_info);
+			}
+		}
+
 		if (dev_info->internal_event_port == 0 ||
 			dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
 			continue;
@@ -2903,6 +3027,48 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 	}
 
 	memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
+
+	return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id)
+{
+	struct event_eth_rx_adapter *rx_adapter;
+	struct eth_device_info *dev_info;
+	struct eth_rx_queue_info *queue_info;
+
+	if (rxa_memzone_lookup())
+		return -ENOMEM;
+
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	rx_adapter = rxa_id_to_adapter(id);
+	if (rx_adapter == NULL)
+		return -EINVAL;
+
+	if (!rx_adapter->use_queue_event_buf)
+		return -EINVAL;
+
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
+		return -EINVAL;
+	}
+
+	dev_info = &rx_adapter->eth_devices[eth_dev_id];
+
+	if (dev_info->rx_queue == NULL ||
+	    !dev_info->rx_queue[rx_queue_id].queue_enabled) {
+		RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
+		return -EINVAL;
+	}
+
+	queue_info = &dev_info->rx_queue[rx_queue_id];
+	rxa_queue_stats_reset(queue_info);
+
 	return 0;
 }
 
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
index ab625f7273..9546d792e9 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/eventdev/rte_event_eth_rx_adapter.h
@@ -35,6 +35,8 @@
  *  - rte_event_eth_rx_adapter_stats_get()
  *  - rte_event_eth_rx_adapter_stats_reset()
  *  - rte_event_eth_rx_adapter_queue_conf_get()
+ *  - rte_event_eth_rx_adapter_queue_stats_get()
+ *  - rte_event_eth_rx_adapter_queue_stats_reset()
  *
  * The application creates an ethernet to event adapter using
  * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create()
@@ -204,6 +206,23 @@ struct rte_event_eth_rx_adapter_queue_conf {
 	/**< event buffer size for this queue */
 };
 
+/**
+ * A structure used to retrieve statistics for an
+ * eth rx adapter queue.
+ */
+struct rte_event_eth_rx_adapter_queue_stats {
+	uint64_t rx_event_buf_count;
+	/**< Rx event buffered count */
+	uint64_t rx_event_buf_size;
+	/**< Rx event buffer size */
+	uint64_t rx_poll_count;
+	/**< Receive queue poll count */
+	uint64_t rx_packets;
+	/**< Received packet count */
+	uint64_t rx_dropped;
+	/**< Received packet dropped count */
+};
+
 /**
  * A structure used to retrieve statistics for an eth rx adapter instance.
  */
@@ -617,6 +636,53 @@ int rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
 			uint16_t rx_queue_id,
 			struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
 
+/**
+ * Retrieve Rx queue statistics.
+ *
+ * @param id
+ *  Adapter identifier.
+ *
+ * @param eth_dev_id
+ *  Port identifier of Ethernet device.
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @param[out] stats
+ *  Pointer to struct rte_event_eth_rx_adapter_queue_stats
+ *
+ * @return
+ *  - 0: Success, queue buffer stats retrieved.
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id,
+		struct rte_event_eth_rx_adapter_queue_stats *stats);
+
+/**
+ * Reset Rx queue statistics.
+ *
+ * @param id
+ *  Adapter identifier.
+ *
+ * @param eth_dev_id
+ *  Port identifier of Ethernet device.
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @return
+ *  - 0: Success, queue buffer stats retrieved.
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id);
 
 #ifdef __cplusplus
 }
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index cd37164141..ade1f1182e 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -103,6 +103,8 @@ EXPERIMENTAL {
 	# added in 21.11
 	rte_event_eth_rx_adapter_create_with_params;
 	rte_event_eth_rx_adapter_queue_conf_get;
+	rte_event_eth_rx_adapter_queue_stats_get;
+	rte_event_eth_rx_adapter_queue_stats_reset;
 };
 
 INTERNAL {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH 2/3] eventdev/eth_rx: support telemetry
  2021-10-28  4:54 [dpdk-dev] [PATCH 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
@ 2021-10-28  4:54 ` Naga Harish K S V
  2021-10-28  4:54 ` [dpdk-dev] [PATCH 3/3] test/event: add unit test for Rx adapter Naga Harish K S V
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 17+ messages in thread
From: Naga Harish K S V @ 2021-10-28  4:54 UTC (permalink / raw)
  To: jerinj, jay.jayatheerthan; +Cc: dev

Added telemetry support for rxa_queue_stats and
rxa_queue_stats_reset to get and reset rx queue
stats respectively

Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
---
 lib/eventdev/rte_event_eth_rx_adapter.c | 124 ++++++++++++++++++++++++
 1 file changed, 124 insertions(+)

diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index ec5b2ef7a6..fe71b0883e 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -3329,6 +3329,122 @@ handle_rxa_get_queue_conf(const char *cmd __rte_unused,
 	return 0;
 }
 
+static int
+handle_rxa_get_queue_stats(const char *cmd __rte_unused,
+			   const char *params,
+			   struct rte_tel_data *d)
+{
+	uint8_t rx_adapter_id;
+	uint16_t rx_queue_id;
+	int eth_dev_id;
+	char *token, *l_params;
+	struct rte_event_eth_rx_adapter_queue_stats q_stats;
+
+	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+		return -1;
+
+	/* Get Rx adapter ID from parameter string */
+	l_params = strdup(params);
+	token = strtok(l_params, ",");
+	rx_adapter_id = strtoul(token, NULL, 10);
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get device ID from parameter string */
+	eth_dev_id = strtoul(token, NULL, 10);
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get Rx queue ID from parameter string */
+	rx_queue_id = strtoul(token, NULL, 10);
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+		return -EINVAL;
+	}
+
+	token = strtok(NULL, "\0");
+	if (token != NULL)
+		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+				 " telemetry command, igrnoring");
+
+	if (rte_event_eth_rx_adapter_queue_stats_get(rx_adapter_id, eth_dev_id,
+						    rx_queue_id, &q_stats)) {
+		RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue stats");
+		return -1;
+	}
+
+	rte_tel_data_start_dict(d);
+	rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
+	rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
+	rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
+	RXA_ADD_DICT(q_stats, rx_event_buf_count);
+	RXA_ADD_DICT(q_stats, rx_event_buf_size);
+	RXA_ADD_DICT(q_stats, rx_poll_count);
+	RXA_ADD_DICT(q_stats, rx_packets);
+	RXA_ADD_DICT(q_stats, rx_dropped);
+
+	return 0;
+}
+
+static int
+handle_rxa_queue_stats_reset(const char *cmd __rte_unused,
+			     const char *params,
+			     struct rte_tel_data *d __rte_unused)
+{
+	uint8_t rx_adapter_id;
+	uint16_t rx_queue_id;
+	int eth_dev_id;
+	char *token, *l_params;
+
+	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+		return -1;
+
+	/* Get Rx adapter ID from parameter string */
+	l_params = strdup(params);
+	token = strtok(l_params, ",");
+	rx_adapter_id = strtoul(token, NULL, 10);
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get device ID from parameter string */
+	eth_dev_id = strtoul(token, NULL, 10);
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get Rx queue ID from parameter string */
+	rx_queue_id = strtoul(token, NULL, 10);
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+		return -EINVAL;
+	}
+
+	token = strtok(NULL, "\0");
+	if (token != NULL)
+		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+				 " telemetry command, igrnoring");
+
+	if (rte_event_eth_rx_adapter_queue_stats_reset(rx_adapter_id,
+						       eth_dev_id,
+						       rx_queue_id)) {
+		RTE_EDEV_LOG_ERR("Failed to reset Rx adapter queue stats");
+		return -1;
+	}
+
+	return 0;
+}
+
 RTE_INIT(rxa_init_telemetry)
 {
 	rte_telemetry_register_cmd("/eventdev/rxa_stats",
@@ -3342,4 +3458,12 @@ RTE_INIT(rxa_init_telemetry)
 	rte_telemetry_register_cmd("/eventdev/rxa_queue_conf",
 		handle_rxa_get_queue_conf,
 		"Returns Rx queue config. Parameter: rxa_id, dev_id, queue_id");
+
+	rte_telemetry_register_cmd("/eventdev/rxa_queue_stats",
+		handle_rxa_get_queue_stats,
+		"Returns Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
+
+	rte_telemetry_register_cmd("/eventdev/rxa_queue_stats_reset",
+		handle_rxa_queue_stats_reset,
+		"Reset Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH 3/3] test/event: add unit test for Rx adapter
  2021-10-28  4:54 [dpdk-dev] [PATCH 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
  2021-10-28  4:54 ` [dpdk-dev] [PATCH 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
@ 2021-10-28  4:54 ` Naga Harish K S V
  2021-10-28  6:55 ` [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
  2021-10-28  7:06 ` [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
  3 siblings, 0 replies; 17+ messages in thread
From: Naga Harish K S V @ 2021-10-28  4:54 UTC (permalink / raw)
  To: jerinj, jay.jayatheerthan; +Cc: dev

add unit test for rte_event_eth_rx_adapter_queue_stats_get() and
rte_event_eth_rx_adapter_queue_stats_reset() apis.

Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
---
 app/test/test_event_eth_rx_adapter.c | 60 ++++++++++++++++++++++++++++
 1 file changed, 60 insertions(+)

diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c
index 1419f6f64d..7cb91b152f 100644
--- a/app/test/test_event_eth_rx_adapter.c
+++ b/app/test/test_event_eth_rx_adapter.c
@@ -471,6 +471,64 @@ adapter_queue_event_buf_test(void)
 	return TEST_SUCCESS;
 }
 
+static int
+adapter_queue_stats_test(void)
+{
+	int err;
+	struct rte_event ev;
+	uint32_t cap;
+	struct rte_event_eth_rx_adapter_queue_conf queue_config = {0};
+	struct rte_event_eth_rx_adapter_queue_stats q_stats;
+
+	err = rte_event_eth_rx_adapter_queue_stats_get(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0,
+						&q_stats);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_stats_reset(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+	err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
+					 &cap);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	ev.queue_id = 0;
+	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+	ev.priority = 0;
+
+	queue_config.rx_queue_flags = 0;
+	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
+		ev.flow_id = 1;
+		queue_config.rx_queue_flags =
+			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
+	}
+	queue_config.ev = ev;
+	queue_config.servicing_weight = 1;
+	queue_config.event_buf_size = 1024;
+
+	err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+					TEST_ETHDEV_ID, 0,
+					&queue_config);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_stats_get(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0,
+						&q_stats);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_stats_reset(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+						TEST_ETHDEV_ID,
+						0);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	return TEST_SUCCESS;
+}
+
 static void
 adapter_free(void)
 {
@@ -940,6 +998,8 @@ static struct unit_test_suite event_eth_rx_tests = {
 		TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_conf),
 		TEST_CASE_ST(adapter_create_with_params, adapter_free,
 			     adapter_queue_event_buf_test),
+		TEST_CASE_ST(adapter_create_with_params, adapter_free,
+			     adapter_queue_stats_test),
 		TEST_CASES_END() /**< NULL terminate unit test array */
 	}
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs
  2021-10-28  4:54 [dpdk-dev] [PATCH 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
  2021-10-28  4:54 ` [dpdk-dev] [PATCH 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
  2021-10-28  4:54 ` [dpdk-dev] [PATCH 3/3] test/event: add unit test for Rx adapter Naga Harish K S V
@ 2021-10-28  6:55 ` Naga Harish K S V
  2021-10-28  6:55   ` [dpdk-dev] [PATCH v2 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
  2021-10-28  6:55   ` [dpdk-dev] [PATCH v2 3/3] test/event: add unit test for Rx adapter Naga Harish K S V
  2021-10-28  7:06 ` [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
  3 siblings, 2 replies; 17+ messages in thread
From: Naga Harish K S V @ 2021-10-28  6:55 UTC (permalink / raw)
  To: jerinj, jay.jayatheerthan; +Cc: dev

This patch adds new api ``rte_event_eth_rx_adapter_queue_stats_get`` to
retrieve queue stats. The queue stats are in the format
``struct rte_event_eth_rx_adapter_queue_stats``.

For resetting the queue stats,
``rte_event_eth_rx_adapter_queue_stats_reset`` api is added.

The adapter stats_get and stats_reset apis are also updated to
handle queue level event buffer use case.

Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
---
 .../prog_guide/event_ethernet_rx_adapter.rst  |  11 +
 lib/eventdev/eventdev_pmd.h                   |  52 ++++
 lib/eventdev/rte_event_eth_rx_adapter.c       | 268 +++++++++++++++---
 lib/eventdev/rte_event_eth_rx_adapter.h       |  66 +++++
 lib/eventdev/version.map                      |   2 +
 5 files changed, 356 insertions(+), 43 deletions(-)

diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
index 8b58130fc5..67b11e1563 100644
--- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
+++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
@@ -166,6 +166,17 @@ flags for handling received packets, event queue identifier, scheduler type,
 event priority, polling frequency of the receive queue and flow identifier
 in struct ``rte_event_eth_rx_adapter_queue_conf``.
 
+Getting and resetting Adapter queue stats
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``rte_event_eth_rx_adapter_queue_stats_get()`` function reports
+adapter queue counters defined in struct ``rte_event_eth_rx_adapter_queue_stats``.
+This function reports queue level stats only when queue level event buffer is
+used otherwise it returns -EINVAL.
+
+The ``rte_event_eth_rx_adapter_queue_stats_reset`` function can be used to
+reset queue level stats when queue level event buffer is in use.
+
 Interrupt Based Rx Queues
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
 
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index d009e24309..3ba49d1fd4 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -749,6 +749,53 @@ typedef int (*eventdev_eth_rx_adapter_stats_get)
 typedef int (*eventdev_eth_rx_adapter_stats_reset)
 			(const struct rte_eventdev *dev,
 			const struct rte_eth_dev *eth_dev);
+
+struct rte_event_eth_rx_adapter_queue_stats;
+
+/**
+ * Retrieve ethernet Rx adapter queue statistics.
+ *
+ * @param dev
+ *   Event device pointer
+ *
+ * @param eth_dev
+ *   Ethernet device pointer
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @param[out] q_stats
+ *   Pointer to queue stats structure
+ *
+ * @return
+ *   Return 0 on success.
+ */
+typedef int (*eventdev_eth_rx_adapter_q_stats_get)
+			(const struct rte_eventdev *dev,
+			 const struct rte_eth_dev *eth_dev,
+			 uint16_t rx_queue_id,
+			 struct rte_event_eth_rx_adapter_queue_stats *q_stats);
+
+/**
+ * Reset ethernet Rx adapter queue statistics.
+ *
+ * @param dev
+ *   Event device pointer
+ *
+ * @param eth_dev
+ *   Ethernet device pointer
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @return
+ *   Return 0 on success.
+ */
+typedef int (*eventdev_eth_rx_adapter_q_stats_reset)
+			(const struct rte_eventdev *dev,
+			 const struct rte_eth_dev *eth_dev,
+			 uint16_t rx_queue_id);
+
 /**
  * Start eventdev selftest.
  *
@@ -1224,6 +1271,11 @@ struct eventdev_ops {
 	eventdev_crypto_adapter_stats_reset crypto_adapter_stats_reset;
 	/**< Reset crypto stats */
 
+	eventdev_eth_rx_adapter_q_stats_get eth_rx_adapter_queue_stats_get;
+	/**< Get ethernet Rx queue stats */
+	eventdev_eth_rx_adapter_q_stats_reset eth_rx_adapter_queue_stats_reset;
+	/**< Reset ethernet Rx queue stats */
+
 	eventdev_eth_tx_adapter_caps_get_t eth_tx_adapter_caps_get;
 	/**< Get ethernet Tx adapter capabilities */
 
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index a175c61551..31bbceb6c8 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -245,6 +245,10 @@ struct eth_rx_queue_info {
 	uint64_t event;
 	struct eth_rx_vector_data vector_data;
 	struct eth_event_enqueue_buffer *event_buf;
+	/* use adapter stats struct for queue level stats,
+	 * as same stats need to be updated for adapter and queue
+	 */
+	struct rte_event_eth_rx_adapter_stats *stats;
 };
 
 static struct event_eth_rx_adapter **event_eth_rx_adapter;
@@ -268,14 +272,18 @@ rxa_validate_id(uint8_t id)
 
 static inline struct eth_event_enqueue_buffer *
 rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
-		  uint16_t rx_queue_id)
+		  uint16_t rx_queue_id,
+		  struct rte_event_eth_rx_adapter_stats **stats)
 {
 	if (rx_adapter->use_queue_event_buf) {
 		struct eth_device_info *dev_info =
 			&rx_adapter->eth_devices[eth_dev_id];
+		*stats = dev_info->rx_queue[rx_queue_id].stats;
 		return dev_info->rx_queue[rx_queue_id].event_buf;
-	} else
+	} else {
+		*stats = &rx_adapter->stats;
 		return &rx_adapter->event_enqueue_buffer;
+	}
 }
 
 #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
@@ -766,9 +774,9 @@ rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
 /* Enqueue buffered events to event device */
 static inline uint16_t
 rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
-		       struct eth_event_enqueue_buffer *buf)
+		       struct eth_event_enqueue_buffer *buf,
+		       struct rte_event_eth_rx_adapter_stats *stats)
 {
-	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
 	uint16_t count = buf->last ? buf->last - buf->head : buf->count;
 
 	if (!count)
@@ -883,7 +891,8 @@ rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
 static inline void
 rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
 		 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
-		 struct eth_event_enqueue_buffer *buf)
+		 struct eth_event_enqueue_buffer *buf,
+		 struct rte_event_eth_rx_adapter_stats *stats)
 {
 	uint32_t i;
 	struct eth_device_info *dev_info =
@@ -954,7 +963,7 @@ rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
 		else
 			num = nb_cb;
 		if (dropped)
-			rx_adapter->stats.rx_dropped += dropped;
+			stats->rx_dropped += dropped;
 	}
 
 	buf->count += num;
@@ -985,11 +994,10 @@ rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
 static inline uint32_t
 rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
 	   uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
-	   int *rxq_empty, struct eth_event_enqueue_buffer *buf)
+	   int *rxq_empty, struct eth_event_enqueue_buffer *buf,
+	   struct rte_event_eth_rx_adapter_stats *stats)
 {
 	struct rte_mbuf *mbufs[BATCH_SIZE];
-	struct rte_event_eth_rx_adapter_stats *stats =
-					&rx_adapter->stats;
 	uint16_t n;
 	uint32_t nb_rx = 0;
 
@@ -1000,7 +1008,7 @@ rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
 	 */
 	while (rxa_pkt_buf_available(buf)) {
 		if (buf->count >= BATCH_SIZE)
-			rxa_flush_event_buffer(rx_adapter, buf);
+			rxa_flush_event_buffer(rx_adapter, buf, stats);
 
 		stats->rx_poll_count++;
 		n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
@@ -1009,14 +1017,17 @@ rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
 				*rxq_empty = 1;
 			break;
 		}
-		rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf);
+		rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf,
+				 stats);
 		nb_rx += n;
 		if (rx_count + nb_rx > max_rx)
 			break;
 	}
 
 	if (buf->count > 0)
-		rxa_flush_event_buffer(rx_adapter, buf);
+		rxa_flush_event_buffer(rx_adapter, buf, stats);
+
+	stats->rx_packets += nb_rx;
 
 	return nb_rx;
 }
@@ -1135,28 +1146,30 @@ rxa_intr_thread(void *arg)
 /* Dequeue <port, q> from interrupt ring and enqueue received
  * mbufs to eventdev
  */
-static inline uint32_t
+static inline void
 rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t n;
 	uint32_t nb_rx = 0;
 	int rxq_empty;
 	struct eth_event_enqueue_buffer *buf;
+	struct rte_event_eth_rx_adapter_stats *stats;
 	rte_spinlock_t *ring_lock;
 	uint8_t max_done = 0;
 
 	if (rx_adapter->num_rx_intr == 0)
-		return 0;
+		return;
 
 	if (rte_ring_count(rx_adapter->intr_ring) == 0
 		&& !rx_adapter->qd_valid)
-		return 0;
+		return;
 
 	buf = &rx_adapter->event_enqueue_buffer;
+	stats = &rx_adapter->stats;
 	ring_lock = &rx_adapter->intr_ring_lock;
 
 	if (buf->count >= BATCH_SIZE)
-		rxa_flush_event_buffer(rx_adapter, buf);
+		rxa_flush_event_buffer(rx_adapter, buf, stats);
 
 	while (rxa_pkt_buf_available(buf)) {
 		struct eth_device_info *dev_info;
@@ -1208,7 +1221,7 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 					continue;
 				n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
 					rx_adapter->max_nb_rx,
-					&rxq_empty, buf);
+					&rxq_empty, buf, stats);
 				nb_rx += n;
 
 				enq_buffer_full = !rxq_empty && n == 0;
@@ -1229,7 +1242,7 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 		} else {
 			n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
 				rx_adapter->max_nb_rx,
-				&rxq_empty, buf);
+				&rxq_empty, buf, stats);
 			rx_adapter->qd_valid = !rxq_empty;
 			nb_rx += n;
 			if (nb_rx > rx_adapter->max_nb_rx)
@@ -1239,7 +1252,6 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 
 done:
 	rx_adapter->stats.rx_intr_packets += nb_rx;
-	return nb_rx;
 }
 
 /*
@@ -1255,12 +1267,13 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
  * the hypervisor's switching layer where adjustments can be made to deal with
  * it.
  */
-static inline uint32_t
+static inline void
 rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t num_queue;
 	uint32_t nb_rx = 0;
 	struct eth_event_enqueue_buffer *buf = NULL;
+	struct rte_event_eth_rx_adapter_stats *stats = NULL;
 	uint32_t wrr_pos;
 	uint32_t max_nb_rx;
 
@@ -1273,24 +1286,24 @@ rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 		uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
 		uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
 
-		buf = rxa_event_buf_get(rx_adapter, d, qid);
+		buf = rxa_event_buf_get(rx_adapter, d, qid, &stats);
 
 		/* Don't do a batch dequeue from the rx queue if there isn't
 		 * enough space in the enqueue buffer.
 		 */
 		if (buf->count >= BATCH_SIZE)
-			rxa_flush_event_buffer(rx_adapter, buf);
+			rxa_flush_event_buffer(rx_adapter, buf, stats);
 		if (!rxa_pkt_buf_available(buf)) {
 			if (rx_adapter->use_queue_event_buf)
 				goto poll_next_entry;
 			else {
 				rx_adapter->wrr_pos = wrr_pos;
-				return nb_rx;
+				return;
 			}
 		}
 
 		nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
-				NULL, buf);
+				NULL, buf, stats);
 		if (nb_rx > max_nb_rx) {
 			rx_adapter->wrr_pos =
 				    (wrr_pos + 1) % rx_adapter->wrr_len;
@@ -1301,7 +1314,6 @@ rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 		if (++wrr_pos == rx_adapter->wrr_len)
 			wrr_pos = 0;
 	}
-	return nb_rx;
 }
 
 static void
@@ -1309,12 +1321,13 @@ rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 {
 	struct event_eth_rx_adapter *rx_adapter = arg;
 	struct eth_event_enqueue_buffer *buf = NULL;
+	struct rte_event_eth_rx_adapter_stats *stats = NULL;
 	struct rte_event *ev;
 
-	buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue);
+	buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue, &stats);
 
 	if (buf->count)
-		rxa_flush_event_buffer(rx_adapter, buf);
+		rxa_flush_event_buffer(rx_adapter, buf, stats);
 
 	if (vec->vector_ev->nb_elem == 0)
 		return;
@@ -1333,7 +1346,6 @@ static int
 rxa_service_func(void *args)
 {
 	struct event_eth_rx_adapter *rx_adapter = args;
-	struct rte_event_eth_rx_adapter_stats *stats;
 
 	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
 		return 0;
@@ -1360,10 +1372,11 @@ rxa_service_func(void *args)
 		}
 	}
 
-	stats = &rx_adapter->stats;
-	stats->rx_packets += rxa_intr_ring_dequeue(rx_adapter);
-	stats->rx_packets += rxa_poll(rx_adapter);
+	rxa_intr_ring_dequeue(rx_adapter);
+	rxa_poll(rx_adapter);
+
 	rte_spinlock_unlock(&rx_adapter->rx_lock);
+
 	return 0;
 }
 
@@ -1937,9 +1950,13 @@ rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
 	if (rx_adapter->use_queue_event_buf) {
 		struct eth_event_enqueue_buffer *event_buf =
 			dev_info->rx_queue[rx_queue_id].event_buf;
+		struct rte_event_eth_rx_adapter_stats *stats =
+			dev_info->rx_queue[rx_queue_id].stats;
 		rte_free(event_buf->events);
 		rte_free(event_buf);
+		rte_free(stats);
 		dev_info->rx_queue[rx_queue_id].event_buf = NULL;
+		dev_info->rx_queue[rx_queue_id].stats = NULL;
 	}
 }
 
@@ -1955,6 +1972,7 @@ rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
 	int sintrq;
 	struct rte_event *qi_ev;
 	struct eth_event_enqueue_buffer *new_rx_buf = NULL;
+	struct rte_event_eth_rx_adapter_stats *stats = NULL;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
 	int ret;
 
@@ -2061,6 +2079,21 @@ rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
 
 	queue_info->event_buf = new_rx_buf;
 
+	/* Allocate storage for adapter queue stats */
+	stats = rte_zmalloc_socket("rx_queue_stats",
+				sizeof(*stats), 0,
+				rte_eth_dev_socket_id(eth_dev_id));
+	if (stats == NULL) {
+		rte_free(new_rx_buf->events);
+		rte_free(new_rx_buf);
+		RTE_EDEV_LOG_ERR("Failed to allocate stats storage for"
+				 " dev_id: %d queue_id: %d",
+				 eth_dev_id, rx_queue_id);
+		return -ENOMEM;
+	}
+
+	queue_info->stats = stats;
+
 	return 0;
 }
 
@@ -2819,6 +2852,15 @@ rte_event_eth_rx_adapter_stop(uint8_t id)
 	return rxa_ctrl(id, 0);
 }
 
+static inline void
+rxa_queue_stats_reset(struct eth_rx_queue_info *queue_info)
+{
+	struct rte_event_eth_rx_adapter_stats *q_stats;
+
+	q_stats = queue_info->stats;
+	memset(q_stats, 0, sizeof(*q_stats));
+}
+
 int
 rte_event_eth_rx_adapter_stats_get(uint8_t id,
 			       struct rte_event_eth_rx_adapter_stats *stats)
@@ -2829,7 +2871,9 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 	struct rte_event_eth_rx_adapter_stats dev_stats;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
-	uint32_t i;
+	struct eth_rx_queue_info *queue_info;
+	struct rte_event_eth_rx_adapter_stats *q_stats;
+	uint32_t i, j;
 	int ret;
 
 	if (rxa_memzone_lookup())
@@ -2843,8 +2887,32 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 
 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
 	memset(stats, 0, sizeof(*stats));
+
+	if (rx_adapter->service_inited)
+		*stats = rx_adapter->stats;
+
 	RTE_ETH_FOREACH_DEV(i) {
 		dev_info = &rx_adapter->eth_devices[i];
+
+		if (rx_adapter->use_queue_event_buf  && dev_info->rx_queue) {
+
+			for (j = 0; j < dev_info->dev->data->nb_rx_queues;
+						j++) {
+				queue_info = &dev_info->rx_queue[j];
+				if (!queue_info->queue_enabled)
+					continue;
+				q_stats = queue_info->stats;
+
+				stats->rx_packets += q_stats->rx_packets;
+				stats->rx_poll_count += q_stats->rx_poll_count;
+				stats->rx_enq_count += q_stats->rx_enq_count;
+				stats->rx_enq_retry += q_stats->rx_enq_retry;
+				stats->rx_dropped += q_stats->rx_dropped;
+				stats->rx_enq_block_cycles +=
+						q_stats->rx_enq_block_cycles;
+			}
+		}
+
 		if (dev_info->internal_event_port == 0 ||
 			dev->dev_ops->eth_rx_adapter_stats_get == NULL)
 			continue;
@@ -2857,19 +2925,69 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 		dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
 	}
 
-	if (rx_adapter->service_inited)
-		*stats = rx_adapter->stats;
-
+	buf = &rx_adapter->event_enqueue_buffer;
 	stats->rx_packets += dev_stats_sum.rx_packets;
 	stats->rx_enq_count += dev_stats_sum.rx_enq_count;
+	stats->rx_event_buf_count = buf->count;
+	stats->rx_event_buf_size = buf->events_size;
 
-	if (!rx_adapter->use_queue_event_buf) {
-		buf = &rx_adapter->event_enqueue_buffer;
-		stats->rx_event_buf_count = buf->count;
-		stats->rx_event_buf_size = buf->events_size;
-	} else {
-		stats->rx_event_buf_count = 0;
-		stats->rx_event_buf_size = 0;
+	return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id,
+		struct rte_event_eth_rx_adapter_queue_stats *stats)
+{
+	struct event_eth_rx_adapter *rx_adapter;
+	struct eth_device_info *dev_info;
+	struct eth_rx_queue_info *queue_info;
+	struct eth_event_enqueue_buffer *event_buf;
+	struct rte_event_eth_rx_adapter_stats *q_stats;
+	struct rte_eventdev *dev;
+
+	if (rxa_memzone_lookup())
+		return -ENOMEM;
+
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	rx_adapter = rxa_id_to_adapter(id);
+
+	if (rx_adapter == NULL || stats == NULL)
+		return -EINVAL;
+
+	if (!rx_adapter->use_queue_event_buf)
+		return -EINVAL;
+
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
+		return -EINVAL;
+	}
+
+	dev_info = &rx_adapter->eth_devices[eth_dev_id];
+	if (dev_info->rx_queue == NULL ||
+	    !dev_info->rx_queue[rx_queue_id].queue_enabled) {
+		RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
+		return -EINVAL;
+	}
+
+	queue_info = &dev_info->rx_queue[rx_queue_id];
+	event_buf = queue_info->event_buf;
+	q_stats = queue_info->stats;
+
+	stats->rx_event_buf_count = event_buf->count;
+	stats->rx_event_buf_size = event_buf->events_size;
+	stats->rx_packets = q_stats->rx_packets;
+	stats->rx_poll_count = q_stats->rx_poll_count;
+	stats->rx_dropped = q_stats->rx_dropped;
+
+	dev = &rte_eventdevs[rx_adapter->eventdev_id];
+	if (dev->dev_ops->eth_rx_adapter_queue_stats_get != NULL) {
+		return (*dev->dev_ops->eth_rx_adapter_queue_stats_get)(dev,
+						&rte_eth_devices[eth_dev_id],
+						rx_queue_id, stats);
 	}
 
 	return 0;
@@ -2881,7 +2999,8 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
-	uint32_t i;
+	struct eth_rx_queue_info *queue_info;
+	uint32_t i, j;
 
 	if (rxa_memzone_lookup())
 		return -ENOMEM;
@@ -2893,8 +3012,21 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 		return -EINVAL;
 
 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
+
 	RTE_ETH_FOREACH_DEV(i) {
 		dev_info = &rx_adapter->eth_devices[i];
+
+		if (rx_adapter->use_queue_event_buf  && dev_info->rx_queue) {
+
+			for (j = 0; j < dev_info->dev->data->nb_rx_queues;
+						j++) {
+				queue_info = &dev_info->rx_queue[j];
+				if (!queue_info->queue_enabled)
+					continue;
+				rxa_queue_stats_reset(queue_info);
+			}
+		}
+
 		if (dev_info->internal_event_port == 0 ||
 			dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
 			continue;
@@ -2903,6 +3035,56 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 	}
 
 	memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
+
+	return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id)
+{
+	struct event_eth_rx_adapter *rx_adapter;
+	struct eth_device_info *dev_info;
+	struct eth_rx_queue_info *queue_info;
+	struct rte_eventdev *dev;
+
+	if (rxa_memzone_lookup())
+		return -ENOMEM;
+
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	rx_adapter = rxa_id_to_adapter(id);
+	if (rx_adapter == NULL)
+		return -EINVAL;
+
+	if (!rx_adapter->use_queue_event_buf)
+		return -EINVAL;
+
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
+		return -EINVAL;
+	}
+
+	dev_info = &rx_adapter->eth_devices[eth_dev_id];
+
+	if (dev_info->rx_queue == NULL ||
+	    !dev_info->rx_queue[rx_queue_id].queue_enabled) {
+		RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
+		return -EINVAL;
+	}
+
+	queue_info = &dev_info->rx_queue[rx_queue_id];
+	rxa_queue_stats_reset(queue_info);
+
+	dev = &rte_eventdevs[rx_adapter->eventdev_id];
+	if (dev->dev_ops->eth_rx_adapter_queue_stats_reset != NULL) {
+		return (*dev->dev_ops->eth_rx_adapter_queue_stats_reset)(dev,
+						&rte_eth_devices[eth_dev_id],
+						rx_queue_id);
+	}
+
 	return 0;
 }
 
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
index ab625f7273..9546d792e9 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/eventdev/rte_event_eth_rx_adapter.h
@@ -35,6 +35,8 @@
  *  - rte_event_eth_rx_adapter_stats_get()
  *  - rte_event_eth_rx_adapter_stats_reset()
  *  - rte_event_eth_rx_adapter_queue_conf_get()
+ *  - rte_event_eth_rx_adapter_queue_stats_get()
+ *  - rte_event_eth_rx_adapter_queue_stats_reset()
  *
  * The application creates an ethernet to event adapter using
  * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create()
@@ -204,6 +206,23 @@ struct rte_event_eth_rx_adapter_queue_conf {
 	/**< event buffer size for this queue */
 };
 
+/**
+ * A structure used to retrieve statistics for an
+ * eth rx adapter queue.
+ */
+struct rte_event_eth_rx_adapter_queue_stats {
+	uint64_t rx_event_buf_count;
+	/**< Rx event buffered count */
+	uint64_t rx_event_buf_size;
+	/**< Rx event buffer size */
+	uint64_t rx_poll_count;
+	/**< Receive queue poll count */
+	uint64_t rx_packets;
+	/**< Received packet count */
+	uint64_t rx_dropped;
+	/**< Received packet dropped count */
+};
+
 /**
  * A structure used to retrieve statistics for an eth rx adapter instance.
  */
@@ -617,6 +636,53 @@ int rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
 			uint16_t rx_queue_id,
 			struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
 
+/**
+ * Retrieve Rx queue statistics.
+ *
+ * @param id
+ *  Adapter identifier.
+ *
+ * @param eth_dev_id
+ *  Port identifier of Ethernet device.
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @param[out] stats
+ *  Pointer to struct rte_event_eth_rx_adapter_queue_stats
+ *
+ * @return
+ *  - 0: Success, queue buffer stats retrieved.
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id,
+		struct rte_event_eth_rx_adapter_queue_stats *stats);
+
+/**
+ * Reset Rx queue statistics.
+ *
+ * @param id
+ *  Adapter identifier.
+ *
+ * @param eth_dev_id
+ *  Port identifier of Ethernet device.
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @return
+ *  - 0: Success, queue buffer stats retrieved.
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id);
 
 #ifdef __cplusplus
 }
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index cd37164141..ade1f1182e 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -103,6 +103,8 @@ EXPERIMENTAL {
 	# added in 21.11
 	rte_event_eth_rx_adapter_create_with_params;
 	rte_event_eth_rx_adapter_queue_conf_get;
+	rte_event_eth_rx_adapter_queue_stats_get;
+	rte_event_eth_rx_adapter_queue_stats_reset;
 };
 
 INTERNAL {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v2 2/3] eventdev/eth_rx: support telemetry
  2021-10-28  6:55 ` [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
@ 2021-10-28  6:55   ` Naga Harish K S V
  2021-10-28  6:55   ` [dpdk-dev] [PATCH v2 3/3] test/event: add unit test for Rx adapter Naga Harish K S V
  1 sibling, 0 replies; 17+ messages in thread
From: Naga Harish K S V @ 2021-10-28  6:55 UTC (permalink / raw)
  To: jerinj, jay.jayatheerthan; +Cc: dev

Added telemetry support for rxa_queue_stats and
rxa_queue_stats_reset to get and reset rx queue
stats respectively

Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
---
 lib/eventdev/rte_event_eth_rx_adapter.c | 124 ++++++++++++++++++++++++
 1 file changed, 124 insertions(+)

diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index 31bbceb6c8..8cfc10f0c2 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -3345,6 +3345,122 @@ handle_rxa_get_queue_conf(const char *cmd __rte_unused,
 	return 0;
 }
 
+static int
+handle_rxa_get_queue_stats(const char *cmd __rte_unused,
+			   const char *params,
+			   struct rte_tel_data *d)
+{
+	uint8_t rx_adapter_id;
+	uint16_t rx_queue_id;
+	int eth_dev_id;
+	char *token, *l_params;
+	struct rte_event_eth_rx_adapter_queue_stats q_stats;
+
+	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+		return -1;
+
+	/* Get Rx adapter ID from parameter string */
+	l_params = strdup(params);
+	token = strtok(l_params, ",");
+	rx_adapter_id = strtoul(token, NULL, 10);
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get device ID from parameter string */
+	eth_dev_id = strtoul(token, NULL, 10);
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get Rx queue ID from parameter string */
+	rx_queue_id = strtoul(token, NULL, 10);
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+		return -EINVAL;
+	}
+
+	token = strtok(NULL, "\0");
+	if (token != NULL)
+		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+				 " telemetry command, igrnoring");
+
+	if (rte_event_eth_rx_adapter_queue_stats_get(rx_adapter_id, eth_dev_id,
+						    rx_queue_id, &q_stats)) {
+		RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue stats");
+		return -1;
+	}
+
+	rte_tel_data_start_dict(d);
+	rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
+	rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
+	rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
+	RXA_ADD_DICT(q_stats, rx_event_buf_count);
+	RXA_ADD_DICT(q_stats, rx_event_buf_size);
+	RXA_ADD_DICT(q_stats, rx_poll_count);
+	RXA_ADD_DICT(q_stats, rx_packets);
+	RXA_ADD_DICT(q_stats, rx_dropped);
+
+	return 0;
+}
+
+static int
+handle_rxa_queue_stats_reset(const char *cmd __rte_unused,
+			     const char *params,
+			     struct rte_tel_data *d __rte_unused)
+{
+	uint8_t rx_adapter_id;
+	uint16_t rx_queue_id;
+	int eth_dev_id;
+	char *token, *l_params;
+
+	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+		return -1;
+
+	/* Get Rx adapter ID from parameter string */
+	l_params = strdup(params);
+	token = strtok(l_params, ",");
+	rx_adapter_id = strtoul(token, NULL, 10);
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get device ID from parameter string */
+	eth_dev_id = strtoul(token, NULL, 10);
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get Rx queue ID from parameter string */
+	rx_queue_id = strtoul(token, NULL, 10);
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+		return -EINVAL;
+	}
+
+	token = strtok(NULL, "\0");
+	if (token != NULL)
+		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+				 " telemetry command, igrnoring");
+
+	if (rte_event_eth_rx_adapter_queue_stats_reset(rx_adapter_id,
+						       eth_dev_id,
+						       rx_queue_id)) {
+		RTE_EDEV_LOG_ERR("Failed to reset Rx adapter queue stats");
+		return -1;
+	}
+
+	return 0;
+}
+
 RTE_INIT(rxa_init_telemetry)
 {
 	rte_telemetry_register_cmd("/eventdev/rxa_stats",
@@ -3358,4 +3474,12 @@ RTE_INIT(rxa_init_telemetry)
 	rte_telemetry_register_cmd("/eventdev/rxa_queue_conf",
 		handle_rxa_get_queue_conf,
 		"Returns Rx queue config. Parameter: rxa_id, dev_id, queue_id");
+
+	rte_telemetry_register_cmd("/eventdev/rxa_queue_stats",
+		handle_rxa_get_queue_stats,
+		"Returns Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
+
+	rte_telemetry_register_cmd("/eventdev/rxa_queue_stats_reset",
+		handle_rxa_queue_stats_reset,
+		"Reset Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v2 3/3] test/event: add unit test for Rx adapter
  2021-10-28  6:55 ` [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
  2021-10-28  6:55   ` [dpdk-dev] [PATCH v2 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
@ 2021-10-28  6:55   ` Naga Harish K S V
  1 sibling, 0 replies; 17+ messages in thread
From: Naga Harish K S V @ 2021-10-28  6:55 UTC (permalink / raw)
  To: jerinj, jay.jayatheerthan; +Cc: dev

add unit test for rte_event_eth_rx_adapter_queue_stats_get() and
rte_event_eth_rx_adapter_queue_stats_reset() apis.

Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
---
 app/test/test_event_eth_rx_adapter.c | 60 ++++++++++++++++++++++++++++
 1 file changed, 60 insertions(+)

diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c
index 1419f6f64d..7cb91b152f 100644
--- a/app/test/test_event_eth_rx_adapter.c
+++ b/app/test/test_event_eth_rx_adapter.c
@@ -471,6 +471,64 @@ adapter_queue_event_buf_test(void)
 	return TEST_SUCCESS;
 }
 
+static int
+adapter_queue_stats_test(void)
+{
+	int err;
+	struct rte_event ev;
+	uint32_t cap;
+	struct rte_event_eth_rx_adapter_queue_conf queue_config = {0};
+	struct rte_event_eth_rx_adapter_queue_stats q_stats;
+
+	err = rte_event_eth_rx_adapter_queue_stats_get(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0,
+						&q_stats);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_stats_reset(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+	err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
+					 &cap);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	ev.queue_id = 0;
+	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+	ev.priority = 0;
+
+	queue_config.rx_queue_flags = 0;
+	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
+		ev.flow_id = 1;
+		queue_config.rx_queue_flags =
+			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
+	}
+	queue_config.ev = ev;
+	queue_config.servicing_weight = 1;
+	queue_config.event_buf_size = 1024;
+
+	err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+					TEST_ETHDEV_ID, 0,
+					&queue_config);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_stats_get(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0,
+						&q_stats);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_stats_reset(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+						TEST_ETHDEV_ID,
+						0);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	return TEST_SUCCESS;
+}
+
 static void
 adapter_free(void)
 {
@@ -940,6 +998,8 @@ static struct unit_test_suite event_eth_rx_tests = {
 		TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_conf),
 		TEST_CASE_ST(adapter_create_with_params, adapter_free,
 			     adapter_queue_event_buf_test),
+		TEST_CASE_ST(adapter_create_with_params, adapter_free,
+			     adapter_queue_stats_test),
 		TEST_CASES_END() /**< NULL terminate unit test array */
 	}
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs
  2021-10-28  4:54 [dpdk-dev] [PATCH 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
                   ` (2 preceding siblings ...)
  2021-10-28  6:55 ` [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
@ 2021-10-28  7:06 ` Naga Harish K S V
  2021-10-28  7:06   ` [dpdk-dev] [PATCH v2 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
                     ` (4 more replies)
  3 siblings, 5 replies; 17+ messages in thread
From: Naga Harish K S V @ 2021-10-28  7:06 UTC (permalink / raw)
  To: jerinj, jay.jayatheerthan; +Cc: dev

This patch adds new api ``rte_event_eth_rx_adapter_queue_stats_get`` to
retrieve queue stats. The queue stats are in the format
``struct rte_event_eth_rx_adapter_queue_stats``.

For resetting the queue stats,
``rte_event_eth_rx_adapter_queue_stats_reset`` api is added.

The adapter stats_get and stats_reset apis are also updated to
handle queue level event buffer use case.

Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
---
v2:
* added pmd callback support for adapter queue_stats_get and
  queue_stats_reset apis.
---
 .../prog_guide/event_ethernet_rx_adapter.rst  |  11 +
 lib/eventdev/eventdev_pmd.h                   |  52 ++++
 lib/eventdev/rte_event_eth_rx_adapter.c       | 268 +++++++++++++++---
 lib/eventdev/rte_event_eth_rx_adapter.h       |  66 +++++
 lib/eventdev/version.map                      |   2 +
 5 files changed, 356 insertions(+), 43 deletions(-)

diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
index 8b58130fc5..67b11e1563 100644
--- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
+++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
@@ -166,6 +166,17 @@ flags for handling received packets, event queue identifier, scheduler type,
 event priority, polling frequency of the receive queue and flow identifier
 in struct ``rte_event_eth_rx_adapter_queue_conf``.
 
+Getting and resetting Adapter queue stats
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``rte_event_eth_rx_adapter_queue_stats_get()`` function reports
+adapter queue counters defined in struct ``rte_event_eth_rx_adapter_queue_stats``.
+This function reports queue level stats only when queue level event buffer is
+used otherwise it returns -EINVAL.
+
+The ``rte_event_eth_rx_adapter_queue_stats_reset`` function can be used to
+reset queue level stats when queue level event buffer is in use.
+
 Interrupt Based Rx Queues
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
 
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index d009e24309..3ba49d1fd4 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -749,6 +749,53 @@ typedef int (*eventdev_eth_rx_adapter_stats_get)
 typedef int (*eventdev_eth_rx_adapter_stats_reset)
 			(const struct rte_eventdev *dev,
 			const struct rte_eth_dev *eth_dev);
+
+struct rte_event_eth_rx_adapter_queue_stats;
+
+/**
+ * Retrieve ethernet Rx adapter queue statistics.
+ *
+ * @param dev
+ *   Event device pointer
+ *
+ * @param eth_dev
+ *   Ethernet device pointer
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @param[out] q_stats
+ *   Pointer to queue stats structure
+ *
+ * @return
+ *   Return 0 on success.
+ */
+typedef int (*eventdev_eth_rx_adapter_q_stats_get)
+			(const struct rte_eventdev *dev,
+			 const struct rte_eth_dev *eth_dev,
+			 uint16_t rx_queue_id,
+			 struct rte_event_eth_rx_adapter_queue_stats *q_stats);
+
+/**
+ * Reset ethernet Rx adapter queue statistics.
+ *
+ * @param dev
+ *   Event device pointer
+ *
+ * @param eth_dev
+ *   Ethernet device pointer
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @return
+ *   Return 0 on success.
+ */
+typedef int (*eventdev_eth_rx_adapter_q_stats_reset)
+			(const struct rte_eventdev *dev,
+			 const struct rte_eth_dev *eth_dev,
+			 uint16_t rx_queue_id);
+
 /**
  * Start eventdev selftest.
  *
@@ -1224,6 +1271,11 @@ struct eventdev_ops {
 	eventdev_crypto_adapter_stats_reset crypto_adapter_stats_reset;
 	/**< Reset crypto stats */
 
+	eventdev_eth_rx_adapter_q_stats_get eth_rx_adapter_queue_stats_get;
+	/**< Get ethernet Rx queue stats */
+	eventdev_eth_rx_adapter_q_stats_reset eth_rx_adapter_queue_stats_reset;
+	/**< Reset ethernet Rx queue stats */
+
 	eventdev_eth_tx_adapter_caps_get_t eth_tx_adapter_caps_get;
 	/**< Get ethernet Tx adapter capabilities */
 
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index a175c61551..31bbceb6c8 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -245,6 +245,10 @@ struct eth_rx_queue_info {
 	uint64_t event;
 	struct eth_rx_vector_data vector_data;
 	struct eth_event_enqueue_buffer *event_buf;
+	/* use adapter stats struct for queue level stats,
+	 * as same stats need to be updated for adapter and queue
+	 */
+	struct rte_event_eth_rx_adapter_stats *stats;
 };
 
 static struct event_eth_rx_adapter **event_eth_rx_adapter;
@@ -268,14 +272,18 @@ rxa_validate_id(uint8_t id)
 
 static inline struct eth_event_enqueue_buffer *
 rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
-		  uint16_t rx_queue_id)
+		  uint16_t rx_queue_id,
+		  struct rte_event_eth_rx_adapter_stats **stats)
 {
 	if (rx_adapter->use_queue_event_buf) {
 		struct eth_device_info *dev_info =
 			&rx_adapter->eth_devices[eth_dev_id];
+		*stats = dev_info->rx_queue[rx_queue_id].stats;
 		return dev_info->rx_queue[rx_queue_id].event_buf;
-	} else
+	} else {
+		*stats = &rx_adapter->stats;
 		return &rx_adapter->event_enqueue_buffer;
+	}
 }
 
 #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
@@ -766,9 +774,9 @@ rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
 /* Enqueue buffered events to event device */
 static inline uint16_t
 rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
-		       struct eth_event_enqueue_buffer *buf)
+		       struct eth_event_enqueue_buffer *buf,
+		       struct rte_event_eth_rx_adapter_stats *stats)
 {
-	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
 	uint16_t count = buf->last ? buf->last - buf->head : buf->count;
 
 	if (!count)
@@ -883,7 +891,8 @@ rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
 static inline void
 rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
 		 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
-		 struct eth_event_enqueue_buffer *buf)
+		 struct eth_event_enqueue_buffer *buf,
+		 struct rte_event_eth_rx_adapter_stats *stats)
 {
 	uint32_t i;
 	struct eth_device_info *dev_info =
@@ -954,7 +963,7 @@ rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
 		else
 			num = nb_cb;
 		if (dropped)
-			rx_adapter->stats.rx_dropped += dropped;
+			stats->rx_dropped += dropped;
 	}
 
 	buf->count += num;
@@ -985,11 +994,10 @@ rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
 static inline uint32_t
 rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
 	   uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
-	   int *rxq_empty, struct eth_event_enqueue_buffer *buf)
+	   int *rxq_empty, struct eth_event_enqueue_buffer *buf,
+	   struct rte_event_eth_rx_adapter_stats *stats)
 {
 	struct rte_mbuf *mbufs[BATCH_SIZE];
-	struct rte_event_eth_rx_adapter_stats *stats =
-					&rx_adapter->stats;
 	uint16_t n;
 	uint32_t nb_rx = 0;
 
@@ -1000,7 +1008,7 @@ rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
 	 */
 	while (rxa_pkt_buf_available(buf)) {
 		if (buf->count >= BATCH_SIZE)
-			rxa_flush_event_buffer(rx_adapter, buf);
+			rxa_flush_event_buffer(rx_adapter, buf, stats);
 
 		stats->rx_poll_count++;
 		n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
@@ -1009,14 +1017,17 @@ rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
 				*rxq_empty = 1;
 			break;
 		}
-		rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf);
+		rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf,
+				 stats);
 		nb_rx += n;
 		if (rx_count + nb_rx > max_rx)
 			break;
 	}
 
 	if (buf->count > 0)
-		rxa_flush_event_buffer(rx_adapter, buf);
+		rxa_flush_event_buffer(rx_adapter, buf, stats);
+
+	stats->rx_packets += nb_rx;
 
 	return nb_rx;
 }
@@ -1135,28 +1146,30 @@ rxa_intr_thread(void *arg)
 /* Dequeue <port, q> from interrupt ring and enqueue received
  * mbufs to eventdev
  */
-static inline uint32_t
+static inline void
 rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t n;
 	uint32_t nb_rx = 0;
 	int rxq_empty;
 	struct eth_event_enqueue_buffer *buf;
+	struct rte_event_eth_rx_adapter_stats *stats;
 	rte_spinlock_t *ring_lock;
 	uint8_t max_done = 0;
 
 	if (rx_adapter->num_rx_intr == 0)
-		return 0;
+		return;
 
 	if (rte_ring_count(rx_adapter->intr_ring) == 0
 		&& !rx_adapter->qd_valid)
-		return 0;
+		return;
 
 	buf = &rx_adapter->event_enqueue_buffer;
+	stats = &rx_adapter->stats;
 	ring_lock = &rx_adapter->intr_ring_lock;
 
 	if (buf->count >= BATCH_SIZE)
-		rxa_flush_event_buffer(rx_adapter, buf);
+		rxa_flush_event_buffer(rx_adapter, buf, stats);
 
 	while (rxa_pkt_buf_available(buf)) {
 		struct eth_device_info *dev_info;
@@ -1208,7 +1221,7 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 					continue;
 				n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
 					rx_adapter->max_nb_rx,
-					&rxq_empty, buf);
+					&rxq_empty, buf, stats);
 				nb_rx += n;
 
 				enq_buffer_full = !rxq_empty && n == 0;
@@ -1229,7 +1242,7 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 		} else {
 			n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
 				rx_adapter->max_nb_rx,
-				&rxq_empty, buf);
+				&rxq_empty, buf, stats);
 			rx_adapter->qd_valid = !rxq_empty;
 			nb_rx += n;
 			if (nb_rx > rx_adapter->max_nb_rx)
@@ -1239,7 +1252,6 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 
 done:
 	rx_adapter->stats.rx_intr_packets += nb_rx;
-	return nb_rx;
 }
 
 /*
@@ -1255,12 +1267,13 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
  * the hypervisor's switching layer where adjustments can be made to deal with
  * it.
  */
-static inline uint32_t
+static inline void
 rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t num_queue;
 	uint32_t nb_rx = 0;
 	struct eth_event_enqueue_buffer *buf = NULL;
+	struct rte_event_eth_rx_adapter_stats *stats = NULL;
 	uint32_t wrr_pos;
 	uint32_t max_nb_rx;
 
@@ -1273,24 +1286,24 @@ rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 		uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
 		uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
 
-		buf = rxa_event_buf_get(rx_adapter, d, qid);
+		buf = rxa_event_buf_get(rx_adapter, d, qid, &stats);
 
 		/* Don't do a batch dequeue from the rx queue if there isn't
 		 * enough space in the enqueue buffer.
 		 */
 		if (buf->count >= BATCH_SIZE)
-			rxa_flush_event_buffer(rx_adapter, buf);
+			rxa_flush_event_buffer(rx_adapter, buf, stats);
 		if (!rxa_pkt_buf_available(buf)) {
 			if (rx_adapter->use_queue_event_buf)
 				goto poll_next_entry;
 			else {
 				rx_adapter->wrr_pos = wrr_pos;
-				return nb_rx;
+				return;
 			}
 		}
 
 		nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
-				NULL, buf);
+				NULL, buf, stats);
 		if (nb_rx > max_nb_rx) {
 			rx_adapter->wrr_pos =
 				    (wrr_pos + 1) % rx_adapter->wrr_len;
@@ -1301,7 +1314,6 @@ rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 		if (++wrr_pos == rx_adapter->wrr_len)
 			wrr_pos = 0;
 	}
-	return nb_rx;
 }
 
 static void
@@ -1309,12 +1321,13 @@ rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 {
 	struct event_eth_rx_adapter *rx_adapter = arg;
 	struct eth_event_enqueue_buffer *buf = NULL;
+	struct rte_event_eth_rx_adapter_stats *stats = NULL;
 	struct rte_event *ev;
 
-	buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue);
+	buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue, &stats);
 
 	if (buf->count)
-		rxa_flush_event_buffer(rx_adapter, buf);
+		rxa_flush_event_buffer(rx_adapter, buf, stats);
 
 	if (vec->vector_ev->nb_elem == 0)
 		return;
@@ -1333,7 +1346,6 @@ static int
 rxa_service_func(void *args)
 {
 	struct event_eth_rx_adapter *rx_adapter = args;
-	struct rte_event_eth_rx_adapter_stats *stats;
 
 	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
 		return 0;
@@ -1360,10 +1372,11 @@ rxa_service_func(void *args)
 		}
 	}
 
-	stats = &rx_adapter->stats;
-	stats->rx_packets += rxa_intr_ring_dequeue(rx_adapter);
-	stats->rx_packets += rxa_poll(rx_adapter);
+	rxa_intr_ring_dequeue(rx_adapter);
+	rxa_poll(rx_adapter);
+
 	rte_spinlock_unlock(&rx_adapter->rx_lock);
+
 	return 0;
 }
 
@@ -1937,9 +1950,13 @@ rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
 	if (rx_adapter->use_queue_event_buf) {
 		struct eth_event_enqueue_buffer *event_buf =
 			dev_info->rx_queue[rx_queue_id].event_buf;
+		struct rte_event_eth_rx_adapter_stats *stats =
+			dev_info->rx_queue[rx_queue_id].stats;
 		rte_free(event_buf->events);
 		rte_free(event_buf);
+		rte_free(stats);
 		dev_info->rx_queue[rx_queue_id].event_buf = NULL;
+		dev_info->rx_queue[rx_queue_id].stats = NULL;
 	}
 }
 
@@ -1955,6 +1972,7 @@ rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
 	int sintrq;
 	struct rte_event *qi_ev;
 	struct eth_event_enqueue_buffer *new_rx_buf = NULL;
+	struct rte_event_eth_rx_adapter_stats *stats = NULL;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
 	int ret;
 
@@ -2061,6 +2079,21 @@ rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
 
 	queue_info->event_buf = new_rx_buf;
 
+	/* Allocate storage for adapter queue stats */
+	stats = rte_zmalloc_socket("rx_queue_stats",
+				sizeof(*stats), 0,
+				rte_eth_dev_socket_id(eth_dev_id));
+	if (stats == NULL) {
+		rte_free(new_rx_buf->events);
+		rte_free(new_rx_buf);
+		RTE_EDEV_LOG_ERR("Failed to allocate stats storage for"
+				 " dev_id: %d queue_id: %d",
+				 eth_dev_id, rx_queue_id);
+		return -ENOMEM;
+	}
+
+	queue_info->stats = stats;
+
 	return 0;
 }
 
@@ -2819,6 +2852,15 @@ rte_event_eth_rx_adapter_stop(uint8_t id)
 	return rxa_ctrl(id, 0);
 }
 
+static inline void
+rxa_queue_stats_reset(struct eth_rx_queue_info *queue_info)
+{
+	struct rte_event_eth_rx_adapter_stats *q_stats;
+
+	q_stats = queue_info->stats;
+	memset(q_stats, 0, sizeof(*q_stats));
+}
+
 int
 rte_event_eth_rx_adapter_stats_get(uint8_t id,
 			       struct rte_event_eth_rx_adapter_stats *stats)
@@ -2829,7 +2871,9 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 	struct rte_event_eth_rx_adapter_stats dev_stats;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
-	uint32_t i;
+	struct eth_rx_queue_info *queue_info;
+	struct rte_event_eth_rx_adapter_stats *q_stats;
+	uint32_t i, j;
 	int ret;
 
 	if (rxa_memzone_lookup())
@@ -2843,8 +2887,32 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 
 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
 	memset(stats, 0, sizeof(*stats));
+
+	if (rx_adapter->service_inited)
+		*stats = rx_adapter->stats;
+
 	RTE_ETH_FOREACH_DEV(i) {
 		dev_info = &rx_adapter->eth_devices[i];
+
+		if (rx_adapter->use_queue_event_buf  && dev_info->rx_queue) {
+
+			for (j = 0; j < dev_info->dev->data->nb_rx_queues;
+						j++) {
+				queue_info = &dev_info->rx_queue[j];
+				if (!queue_info->queue_enabled)
+					continue;
+				q_stats = queue_info->stats;
+
+				stats->rx_packets += q_stats->rx_packets;
+				stats->rx_poll_count += q_stats->rx_poll_count;
+				stats->rx_enq_count += q_stats->rx_enq_count;
+				stats->rx_enq_retry += q_stats->rx_enq_retry;
+				stats->rx_dropped += q_stats->rx_dropped;
+				stats->rx_enq_block_cycles +=
+						q_stats->rx_enq_block_cycles;
+			}
+		}
+
 		if (dev_info->internal_event_port == 0 ||
 			dev->dev_ops->eth_rx_adapter_stats_get == NULL)
 			continue;
@@ -2857,19 +2925,69 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 		dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
 	}
 
-	if (rx_adapter->service_inited)
-		*stats = rx_adapter->stats;
-
+	buf = &rx_adapter->event_enqueue_buffer;
 	stats->rx_packets += dev_stats_sum.rx_packets;
 	stats->rx_enq_count += dev_stats_sum.rx_enq_count;
+	stats->rx_event_buf_count = buf->count;
+	stats->rx_event_buf_size = buf->events_size;
 
-	if (!rx_adapter->use_queue_event_buf) {
-		buf = &rx_adapter->event_enqueue_buffer;
-		stats->rx_event_buf_count = buf->count;
-		stats->rx_event_buf_size = buf->events_size;
-	} else {
-		stats->rx_event_buf_count = 0;
-		stats->rx_event_buf_size = 0;
+	return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id,
+		struct rte_event_eth_rx_adapter_queue_stats *stats)
+{
+	struct event_eth_rx_adapter *rx_adapter;
+	struct eth_device_info *dev_info;
+	struct eth_rx_queue_info *queue_info;
+	struct eth_event_enqueue_buffer *event_buf;
+	struct rte_event_eth_rx_adapter_stats *q_stats;
+	struct rte_eventdev *dev;
+
+	if (rxa_memzone_lookup())
+		return -ENOMEM;
+
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	rx_adapter = rxa_id_to_adapter(id);
+
+	if (rx_adapter == NULL || stats == NULL)
+		return -EINVAL;
+
+	if (!rx_adapter->use_queue_event_buf)
+		return -EINVAL;
+
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
+		return -EINVAL;
+	}
+
+	dev_info = &rx_adapter->eth_devices[eth_dev_id];
+	if (dev_info->rx_queue == NULL ||
+	    !dev_info->rx_queue[rx_queue_id].queue_enabled) {
+		RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
+		return -EINVAL;
+	}
+
+	queue_info = &dev_info->rx_queue[rx_queue_id];
+	event_buf = queue_info->event_buf;
+	q_stats = queue_info->stats;
+
+	stats->rx_event_buf_count = event_buf->count;
+	stats->rx_event_buf_size = event_buf->events_size;
+	stats->rx_packets = q_stats->rx_packets;
+	stats->rx_poll_count = q_stats->rx_poll_count;
+	stats->rx_dropped = q_stats->rx_dropped;
+
+	dev = &rte_eventdevs[rx_adapter->eventdev_id];
+	if (dev->dev_ops->eth_rx_adapter_queue_stats_get != NULL) {
+		return (*dev->dev_ops->eth_rx_adapter_queue_stats_get)(dev,
+						&rte_eth_devices[eth_dev_id],
+						rx_queue_id, stats);
 	}
 
 	return 0;
@@ -2881,7 +2999,8 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
-	uint32_t i;
+	struct eth_rx_queue_info *queue_info;
+	uint32_t i, j;
 
 	if (rxa_memzone_lookup())
 		return -ENOMEM;
@@ -2893,8 +3012,21 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 		return -EINVAL;
 
 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
+
 	RTE_ETH_FOREACH_DEV(i) {
 		dev_info = &rx_adapter->eth_devices[i];
+
+		if (rx_adapter->use_queue_event_buf  && dev_info->rx_queue) {
+
+			for (j = 0; j < dev_info->dev->data->nb_rx_queues;
+						j++) {
+				queue_info = &dev_info->rx_queue[j];
+				if (!queue_info->queue_enabled)
+					continue;
+				rxa_queue_stats_reset(queue_info);
+			}
+		}
+
 		if (dev_info->internal_event_port == 0 ||
 			dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
 			continue;
@@ -2903,6 +3035,56 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 	}
 
 	memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
+
+	return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id)
+{
+	struct event_eth_rx_adapter *rx_adapter;
+	struct eth_device_info *dev_info;
+	struct eth_rx_queue_info *queue_info;
+	struct rte_eventdev *dev;
+
+	if (rxa_memzone_lookup())
+		return -ENOMEM;
+
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	rx_adapter = rxa_id_to_adapter(id);
+	if (rx_adapter == NULL)
+		return -EINVAL;
+
+	if (!rx_adapter->use_queue_event_buf)
+		return -EINVAL;
+
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
+		return -EINVAL;
+	}
+
+	dev_info = &rx_adapter->eth_devices[eth_dev_id];
+
+	if (dev_info->rx_queue == NULL ||
+	    !dev_info->rx_queue[rx_queue_id].queue_enabled) {
+		RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
+		return -EINVAL;
+	}
+
+	queue_info = &dev_info->rx_queue[rx_queue_id];
+	rxa_queue_stats_reset(queue_info);
+
+	dev = &rte_eventdevs[rx_adapter->eventdev_id];
+	if (dev->dev_ops->eth_rx_adapter_queue_stats_reset != NULL) {
+		return (*dev->dev_ops->eth_rx_adapter_queue_stats_reset)(dev,
+						&rte_eth_devices[eth_dev_id],
+						rx_queue_id);
+	}
+
 	return 0;
 }
 
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
index ab625f7273..9546d792e9 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/eventdev/rte_event_eth_rx_adapter.h
@@ -35,6 +35,8 @@
  *  - rte_event_eth_rx_adapter_stats_get()
  *  - rte_event_eth_rx_adapter_stats_reset()
  *  - rte_event_eth_rx_adapter_queue_conf_get()
+ *  - rte_event_eth_rx_adapter_queue_stats_get()
+ *  - rte_event_eth_rx_adapter_queue_stats_reset()
  *
  * The application creates an ethernet to event adapter using
  * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create()
@@ -204,6 +206,23 @@ struct rte_event_eth_rx_adapter_queue_conf {
 	/**< event buffer size for this queue */
 };
 
+/**
+ * A structure used to retrieve statistics for an
+ * eth rx adapter queue.
+ */
+struct rte_event_eth_rx_adapter_queue_stats {
+	uint64_t rx_event_buf_count;
+	/**< Rx event buffered count */
+	uint64_t rx_event_buf_size;
+	/**< Rx event buffer size */
+	uint64_t rx_poll_count;
+	/**< Receive queue poll count */
+	uint64_t rx_packets;
+	/**< Received packet count */
+	uint64_t rx_dropped;
+	/**< Received packet dropped count */
+};
+
 /**
  * A structure used to retrieve statistics for an eth rx adapter instance.
  */
@@ -617,6 +636,53 @@ int rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
 			uint16_t rx_queue_id,
 			struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
 
+/**
+ * Retrieve Rx queue statistics.
+ *
+ * @param id
+ *  Adapter identifier.
+ *
+ * @param eth_dev_id
+ *  Port identifier of Ethernet device.
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @param[out] stats
+ *  Pointer to struct rte_event_eth_rx_adapter_queue_stats
+ *
+ * @return
+ *  - 0: Success, queue buffer stats retrieved.
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id,
+		struct rte_event_eth_rx_adapter_queue_stats *stats);
+
+/**
+ * Reset Rx queue statistics.
+ *
+ * @param id
+ *  Adapter identifier.
+ *
+ * @param eth_dev_id
+ *  Port identifier of Ethernet device.
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @return
+ *  - 0: Success, queue buffer stats retrieved.
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id);
 
 #ifdef __cplusplus
 }
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index cd37164141..ade1f1182e 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -103,6 +103,8 @@ EXPERIMENTAL {
 	# added in 21.11
 	rte_event_eth_rx_adapter_create_with_params;
 	rte_event_eth_rx_adapter_queue_conf_get;
+	rte_event_eth_rx_adapter_queue_stats_get;
+	rte_event_eth_rx_adapter_queue_stats_reset;
 };
 
 INTERNAL {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v2 2/3] eventdev/eth_rx: support telemetry
  2021-10-28  7:06 ` [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
@ 2021-10-28  7:06   ` Naga Harish K S V
  2021-10-28  7:06   ` [dpdk-dev] [PATCH v2 3/3] test/event: add unit test for Rx adapter Naga Harish K S V
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 17+ messages in thread
From: Naga Harish K S V @ 2021-10-28  7:06 UTC (permalink / raw)
  To: jerinj, jay.jayatheerthan; +Cc: dev

Added telemetry support for rxa_queue_stats and
rxa_queue_stats_reset to get and reset rx queue
stats respectively

Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
---
 lib/eventdev/rte_event_eth_rx_adapter.c | 124 ++++++++++++++++++++++++
 1 file changed, 124 insertions(+)

diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index 31bbceb6c8..8cfc10f0c2 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -3345,6 +3345,122 @@ handle_rxa_get_queue_conf(const char *cmd __rte_unused,
 	return 0;
 }
 
+static int
+handle_rxa_get_queue_stats(const char *cmd __rte_unused,
+			   const char *params,
+			   struct rte_tel_data *d)
+{
+	uint8_t rx_adapter_id;
+	uint16_t rx_queue_id;
+	int eth_dev_id;
+	char *token, *l_params;
+	struct rte_event_eth_rx_adapter_queue_stats q_stats;
+
+	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+		return -1;
+
+	/* Get Rx adapter ID from parameter string */
+	l_params = strdup(params);
+	token = strtok(l_params, ",");
+	rx_adapter_id = strtoul(token, NULL, 10);
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get device ID from parameter string */
+	eth_dev_id = strtoul(token, NULL, 10);
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get Rx queue ID from parameter string */
+	rx_queue_id = strtoul(token, NULL, 10);
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+		return -EINVAL;
+	}
+
+	token = strtok(NULL, "\0");
+	if (token != NULL)
+		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+				 " telemetry command, igrnoring");
+
+	if (rte_event_eth_rx_adapter_queue_stats_get(rx_adapter_id, eth_dev_id,
+						    rx_queue_id, &q_stats)) {
+		RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue stats");
+		return -1;
+	}
+
+	rte_tel_data_start_dict(d);
+	rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
+	rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
+	rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
+	RXA_ADD_DICT(q_stats, rx_event_buf_count);
+	RXA_ADD_DICT(q_stats, rx_event_buf_size);
+	RXA_ADD_DICT(q_stats, rx_poll_count);
+	RXA_ADD_DICT(q_stats, rx_packets);
+	RXA_ADD_DICT(q_stats, rx_dropped);
+
+	return 0;
+}
+
+static int
+handle_rxa_queue_stats_reset(const char *cmd __rte_unused,
+			     const char *params,
+			     struct rte_tel_data *d __rte_unused)
+{
+	uint8_t rx_adapter_id;
+	uint16_t rx_queue_id;
+	int eth_dev_id;
+	char *token, *l_params;
+
+	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+		return -1;
+
+	/* Get Rx adapter ID from parameter string */
+	l_params = strdup(params);
+	token = strtok(l_params, ",");
+	rx_adapter_id = strtoul(token, NULL, 10);
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get device ID from parameter string */
+	eth_dev_id = strtoul(token, NULL, 10);
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get Rx queue ID from parameter string */
+	rx_queue_id = strtoul(token, NULL, 10);
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+		return -EINVAL;
+	}
+
+	token = strtok(NULL, "\0");
+	if (token != NULL)
+		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+				 " telemetry command, igrnoring");
+
+	if (rte_event_eth_rx_adapter_queue_stats_reset(rx_adapter_id,
+						       eth_dev_id,
+						       rx_queue_id)) {
+		RTE_EDEV_LOG_ERR("Failed to reset Rx adapter queue stats");
+		return -1;
+	}
+
+	return 0;
+}
+
 RTE_INIT(rxa_init_telemetry)
 {
 	rte_telemetry_register_cmd("/eventdev/rxa_stats",
@@ -3358,4 +3474,12 @@ RTE_INIT(rxa_init_telemetry)
 	rte_telemetry_register_cmd("/eventdev/rxa_queue_conf",
 		handle_rxa_get_queue_conf,
 		"Returns Rx queue config. Parameter: rxa_id, dev_id, queue_id");
+
+	rte_telemetry_register_cmd("/eventdev/rxa_queue_stats",
+		handle_rxa_get_queue_stats,
+		"Returns Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
+
+	rte_telemetry_register_cmd("/eventdev/rxa_queue_stats_reset",
+		handle_rxa_queue_stats_reset,
+		"Reset Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v2 3/3] test/event: add unit test for Rx adapter
  2021-10-28  7:06 ` [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
  2021-10-28  7:06   ` [dpdk-dev] [PATCH v2 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
@ 2021-10-28  7:06   ` Naga Harish K S V
  2021-10-28  8:10   ` [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs Jayatheerthan, Jay
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 17+ messages in thread
From: Naga Harish K S V @ 2021-10-28  7:06 UTC (permalink / raw)
  To: jerinj, jay.jayatheerthan; +Cc: dev

add unit test for rte_event_eth_rx_adapter_queue_stats_get() and
rte_event_eth_rx_adapter_queue_stats_reset() apis.

Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
---
 app/test/test_event_eth_rx_adapter.c | 60 ++++++++++++++++++++++++++++
 1 file changed, 60 insertions(+)

diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c
index 1419f6f64d..7cb91b152f 100644
--- a/app/test/test_event_eth_rx_adapter.c
+++ b/app/test/test_event_eth_rx_adapter.c
@@ -471,6 +471,64 @@ adapter_queue_event_buf_test(void)
 	return TEST_SUCCESS;
 }
 
+static int
+adapter_queue_stats_test(void)
+{
+	int err;
+	struct rte_event ev;
+	uint32_t cap;
+	struct rte_event_eth_rx_adapter_queue_conf queue_config = {0};
+	struct rte_event_eth_rx_adapter_queue_stats q_stats;
+
+	err = rte_event_eth_rx_adapter_queue_stats_get(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0,
+						&q_stats);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_stats_reset(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+	err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
+					 &cap);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	ev.queue_id = 0;
+	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+	ev.priority = 0;
+
+	queue_config.rx_queue_flags = 0;
+	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
+		ev.flow_id = 1;
+		queue_config.rx_queue_flags =
+			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
+	}
+	queue_config.ev = ev;
+	queue_config.servicing_weight = 1;
+	queue_config.event_buf_size = 1024;
+
+	err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+					TEST_ETHDEV_ID, 0,
+					&queue_config);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_stats_get(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0,
+						&q_stats);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_stats_reset(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+						TEST_ETHDEV_ID,
+						0);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	return TEST_SUCCESS;
+}
+
 static void
 adapter_free(void)
 {
@@ -940,6 +998,8 @@ static struct unit_test_suite event_eth_rx_tests = {
 		TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_conf),
 		TEST_CASE_ST(adapter_create_with_params, adapter_free,
 			     adapter_queue_event_buf_test),
+		TEST_CASE_ST(adapter_create_with_params, adapter_free,
+			     adapter_queue_stats_test),
 		TEST_CASES_END() /**< NULL terminate unit test array */
 	}
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs
  2021-10-28  7:06 ` [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
  2021-10-28  7:06   ` [dpdk-dev] [PATCH v2 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
  2021-10-28  7:06   ` [dpdk-dev] [PATCH v2 3/3] test/event: add unit test for Rx adapter Naga Harish K S V
@ 2021-10-28  8:10   ` Jayatheerthan, Jay
  2021-10-28 10:27   ` [dpdk-dev] [PATCH v3 " Naga Harish K S V
  2021-10-28 10:37   ` Naga Harish K S V
  4 siblings, 0 replies; 17+ messages in thread
From: Jayatheerthan, Jay @ 2021-10-28  8:10 UTC (permalink / raw)
  To: Naga Harish K, S V, jerinj; +Cc: dev

> -----Original Message-----
> From: Naga Harish K, S V <s.v.naga.harish.k@intel.com>
> Sent: Thursday, October 28, 2021 12:37 PM
> To: jerinj@marvell.com; Jayatheerthan, Jay <jay.jayatheerthan@intel.com>
> Cc: dev@dpdk.org
> Subject: [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs
> 
> This patch adds new api ``rte_event_eth_rx_adapter_queue_stats_get`` to
> retrieve queue stats. The queue stats are in the format
> ``struct rte_event_eth_rx_adapter_queue_stats``.
> 
> For resetting the queue stats,
> ``rte_event_eth_rx_adapter_queue_stats_reset`` api is added.
> 
> The adapter stats_get and stats_reset apis are also updated to
> handle queue level event buffer use case.
> 
> Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
> ---
> v2:
> * added pmd callback support for adapter queue_stats_get and
>   queue_stats_reset apis.
> ---
>  .../prog_guide/event_ethernet_rx_adapter.rst  |  11 +
>  lib/eventdev/eventdev_pmd.h                   |  52 ++++
>  lib/eventdev/rte_event_eth_rx_adapter.c       | 268 +++++++++++++++---
>  lib/eventdev/rte_event_eth_rx_adapter.h       |  66 +++++
>  lib/eventdev/version.map                      |   2 +
>  5 files changed, 356 insertions(+), 43 deletions(-)
> 
> diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
> index 8b58130fc5..67b11e1563 100644
> --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
> +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
> @@ -166,6 +166,17 @@ flags for handling received packets, event queue identifier, scheduler type,
>  event priority, polling frequency of the receive queue and flow identifier
>  in struct ``rte_event_eth_rx_adapter_queue_conf``.
> 
> +Getting and resetting Adapter queue stats
> +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> +
> +The ``rte_event_eth_rx_adapter_queue_stats_get()`` function reports
> +adapter queue counters defined in struct ``rte_event_eth_rx_adapter_queue_stats``.
> +This function reports queue level stats only when queue level event buffer is
> +used otherwise it returns -EINVAL.
> +
> +The ``rte_event_eth_rx_adapter_queue_stats_reset`` function can be used to
> +reset queue level stats when queue level event buffer is in use.
> +
>  Interrupt Based Rx Queues
>  ~~~~~~~~~~~~~~~~~~~~~~~~~~
> 
> diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
> index d009e24309..3ba49d1fd4 100644
> --- a/lib/eventdev/eventdev_pmd.h
> +++ b/lib/eventdev/eventdev_pmd.h
> @@ -749,6 +749,53 @@ typedef int (*eventdev_eth_rx_adapter_stats_get)
>  typedef int (*eventdev_eth_rx_adapter_stats_reset)
>  			(const struct rte_eventdev *dev,
>  			const struct rte_eth_dev *eth_dev);
> +
> +struct rte_event_eth_rx_adapter_queue_stats;
> +
> +/**
> + * Retrieve ethernet Rx adapter queue statistics.
> + *
> + * @param dev
> + *   Event device pointer
> + *
> + * @param eth_dev
> + *   Ethernet device pointer
> + *
> + * @param rx_queue_id
> + *  Ethernet device receive queue index.
> + *
> + * @param[out] q_stats
> + *   Pointer to queue stats structure
> + *
> + * @return
> + *   Return 0 on success.
> + */
> +typedef int (*eventdev_eth_rx_adapter_q_stats_get)
> +			(const struct rte_eventdev *dev,
> +			 const struct rte_eth_dev *eth_dev,
> +			 uint16_t rx_queue_id,
> +			 struct rte_event_eth_rx_adapter_queue_stats *q_stats);
> +
> +/**
> + * Reset ethernet Rx adapter queue statistics.
> + *
> + * @param dev
> + *   Event device pointer
> + *
> + * @param eth_dev
> + *   Ethernet device pointer
> + *
> + * @param rx_queue_id
> + *  Ethernet device receive queue index.
> + *
> + * @return
> + *   Return 0 on success.
> + */
> +typedef int (*eventdev_eth_rx_adapter_q_stats_reset)
> +			(const struct rte_eventdev *dev,
> +			 const struct rte_eth_dev *eth_dev,
> +			 uint16_t rx_queue_id);
> +
>  /**
>   * Start eventdev selftest.
>   *
> @@ -1224,6 +1271,11 @@ struct eventdev_ops {
>  	eventdev_crypto_adapter_stats_reset crypto_adapter_stats_reset;
>  	/**< Reset crypto stats */
> 
> +	eventdev_eth_rx_adapter_q_stats_get eth_rx_adapter_queue_stats_get;
> +	/**< Get ethernet Rx queue stats */
> +	eventdev_eth_rx_adapter_q_stats_reset eth_rx_adapter_queue_stats_reset;
> +	/**< Reset ethernet Rx queue stats */
> +
>  	eventdev_eth_tx_adapter_caps_get_t eth_tx_adapter_caps_get;
>  	/**< Get ethernet Tx adapter capabilities */
> 
> diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
> index a175c61551..31bbceb6c8 100644
> --- a/lib/eventdev/rte_event_eth_rx_adapter.c
> +++ b/lib/eventdev/rte_event_eth_rx_adapter.c
> @@ -245,6 +245,10 @@ struct eth_rx_queue_info {
>  	uint64_t event;
>  	struct eth_rx_vector_data vector_data;
>  	struct eth_event_enqueue_buffer *event_buf;
> +	/* use adapter stats struct for queue level stats,
> +	 * as same stats need to be updated for adapter and queue
> +	 */
> +	struct rte_event_eth_rx_adapter_stats *stats;
>  };
> 
>  static struct event_eth_rx_adapter **event_eth_rx_adapter;
> @@ -268,14 +272,18 @@ rxa_validate_id(uint8_t id)
> 
>  static inline struct eth_event_enqueue_buffer *
>  rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
> -		  uint16_t rx_queue_id)
> +		  uint16_t rx_queue_id,
> +		  struct rte_event_eth_rx_adapter_stats **stats)
>  {
>  	if (rx_adapter->use_queue_event_buf) {
>  		struct eth_device_info *dev_info =
>  			&rx_adapter->eth_devices[eth_dev_id];
> +		*stats = dev_info->rx_queue[rx_queue_id].stats;
>  		return dev_info->rx_queue[rx_queue_id].event_buf;
> -	} else
> +	} else {
> +		*stats = &rx_adapter->stats;
>  		return &rx_adapter->event_enqueue_buffer;
> +	}
>  }
> 
>  #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
> @@ -766,9 +774,9 @@ rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
>  /* Enqueue buffered events to event device */
>  static inline uint16_t
>  rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
> -		       struct eth_event_enqueue_buffer *buf)
> +		       struct eth_event_enqueue_buffer *buf,
> +		       struct rte_event_eth_rx_adapter_stats *stats)
>  {
> -	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
>  	uint16_t count = buf->last ? buf->last - buf->head : buf->count;
> 
>  	if (!count)
> @@ -883,7 +891,8 @@ rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
>  static inline void
>  rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
>  		 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
> -		 struct eth_event_enqueue_buffer *buf)
> +		 struct eth_event_enqueue_buffer *buf,
> +		 struct rte_event_eth_rx_adapter_stats *stats)
>  {
>  	uint32_t i;
>  	struct eth_device_info *dev_info =
> @@ -954,7 +963,7 @@ rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
>  		else
>  			num = nb_cb;
>  		if (dropped)
> -			rx_adapter->stats.rx_dropped += dropped;
> +			stats->rx_dropped += dropped;
>  	}
> 
>  	buf->count += num;
> @@ -985,11 +994,10 @@ rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
>  static inline uint32_t
>  rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
>  	   uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
> -	   int *rxq_empty, struct eth_event_enqueue_buffer *buf)
> +	   int *rxq_empty, struct eth_event_enqueue_buffer *buf,
> +	   struct rte_event_eth_rx_adapter_stats *stats)
>  {
>  	struct rte_mbuf *mbufs[BATCH_SIZE];
> -	struct rte_event_eth_rx_adapter_stats *stats =
> -					&rx_adapter->stats;
>  	uint16_t n;
>  	uint32_t nb_rx = 0;
> 
> @@ -1000,7 +1008,7 @@ rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
>  	 */
>  	while (rxa_pkt_buf_available(buf)) {
>  		if (buf->count >= BATCH_SIZE)
> -			rxa_flush_event_buffer(rx_adapter, buf);
> +			rxa_flush_event_buffer(rx_adapter, buf, stats);
> 
>  		stats->rx_poll_count++;
>  		n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
> @@ -1009,14 +1017,17 @@ rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
>  				*rxq_empty = 1;
>  			break;
>  		}
> -		rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf);
> +		rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf,
> +				 stats);
>  		nb_rx += n;
>  		if (rx_count + nb_rx > max_rx)
>  			break;
>  	}
> 
>  	if (buf->count > 0)
> -		rxa_flush_event_buffer(rx_adapter, buf);
> +		rxa_flush_event_buffer(rx_adapter, buf, stats);
> +
> +	stats->rx_packets += nb_rx;
> 
>  	return nb_rx;
>  }
> @@ -1135,28 +1146,30 @@ rxa_intr_thread(void *arg)
>  /* Dequeue <port, q> from interrupt ring and enqueue received
>   * mbufs to eventdev
>   */
> -static inline uint32_t
> +static inline void
>  rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	uint32_t n;
>  	uint32_t nb_rx = 0;
>  	int rxq_empty;
>  	struct eth_event_enqueue_buffer *buf;
> +	struct rte_event_eth_rx_adapter_stats *stats;
>  	rte_spinlock_t *ring_lock;
>  	uint8_t max_done = 0;
> 
>  	if (rx_adapter->num_rx_intr == 0)
> -		return 0;
> +		return;
> 
>  	if (rte_ring_count(rx_adapter->intr_ring) == 0
>  		&& !rx_adapter->qd_valid)
> -		return 0;
> +		return;
> 
>  	buf = &rx_adapter->event_enqueue_buffer;
> +	stats = &rx_adapter->stats;
>  	ring_lock = &rx_adapter->intr_ring_lock;
> 
>  	if (buf->count >= BATCH_SIZE)
> -		rxa_flush_event_buffer(rx_adapter, buf);
> +		rxa_flush_event_buffer(rx_adapter, buf, stats);
> 
>  	while (rxa_pkt_buf_available(buf)) {
>  		struct eth_device_info *dev_info;
> @@ -1208,7 +1221,7 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
>  					continue;
>  				n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
>  					rx_adapter->max_nb_rx,
> -					&rxq_empty, buf);
> +					&rxq_empty, buf, stats);
>  				nb_rx += n;
> 
>  				enq_buffer_full = !rxq_empty && n == 0;
> @@ -1229,7 +1242,7 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
>  		} else {
>  			n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
>  				rx_adapter->max_nb_rx,
> -				&rxq_empty, buf);
> +				&rxq_empty, buf, stats);
>  			rx_adapter->qd_valid = !rxq_empty;
>  			nb_rx += n;
>  			if (nb_rx > rx_adapter->max_nb_rx)
> @@ -1239,7 +1252,6 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
> 
>  done:
>  	rx_adapter->stats.rx_intr_packets += nb_rx;
> -	return nb_rx;
>  }
> 
>  /*
> @@ -1255,12 +1267,13 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
>   * the hypervisor's switching layer where adjustments can be made to deal with
>   * it.
>   */
> -static inline uint32_t
> +static inline void
>  rxa_poll(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	uint32_t num_queue;
>  	uint32_t nb_rx = 0;
>  	struct eth_event_enqueue_buffer *buf = NULL;
> +	struct rte_event_eth_rx_adapter_stats *stats = NULL;
>  	uint32_t wrr_pos;
>  	uint32_t max_nb_rx;
> 
> @@ -1273,24 +1286,24 @@ rxa_poll(struct event_eth_rx_adapter *rx_adapter)
>  		uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
>  		uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
> 
> -		buf = rxa_event_buf_get(rx_adapter, d, qid);
> +		buf = rxa_event_buf_get(rx_adapter, d, qid, &stats);
> 
>  		/* Don't do a batch dequeue from the rx queue if there isn't
>  		 * enough space in the enqueue buffer.
>  		 */
>  		if (buf->count >= BATCH_SIZE)
> -			rxa_flush_event_buffer(rx_adapter, buf);
> +			rxa_flush_event_buffer(rx_adapter, buf, stats);
>  		if (!rxa_pkt_buf_available(buf)) {
>  			if (rx_adapter->use_queue_event_buf)
>  				goto poll_next_entry;
>  			else {
>  				rx_adapter->wrr_pos = wrr_pos;
> -				return nb_rx;
> +				return;
>  			}
>  		}
> 
>  		nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
> -				NULL, buf);
> +				NULL, buf, stats);
>  		if (nb_rx > max_nb_rx) {
>  			rx_adapter->wrr_pos =
>  				    (wrr_pos + 1) % rx_adapter->wrr_len;
> @@ -1301,7 +1314,6 @@ rxa_poll(struct event_eth_rx_adapter *rx_adapter)
>  		if (++wrr_pos == rx_adapter->wrr_len)
>  			wrr_pos = 0;
>  	}
> -	return nb_rx;
>  }
> 
>  static void
> @@ -1309,12 +1321,13 @@ rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
>  {
>  	struct event_eth_rx_adapter *rx_adapter = arg;
>  	struct eth_event_enqueue_buffer *buf = NULL;
> +	struct rte_event_eth_rx_adapter_stats *stats = NULL;
>  	struct rte_event *ev;
> 
> -	buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue);
> +	buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue, &stats);
> 
>  	if (buf->count)
> -		rxa_flush_event_buffer(rx_adapter, buf);
> +		rxa_flush_event_buffer(rx_adapter, buf, stats);
> 
>  	if (vec->vector_ev->nb_elem == 0)
>  		return;
> @@ -1333,7 +1346,6 @@ static int
>  rxa_service_func(void *args)
>  {
>  	struct event_eth_rx_adapter *rx_adapter = args;
> -	struct rte_event_eth_rx_adapter_stats *stats;
> 
>  	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
>  		return 0;
> @@ -1360,10 +1372,11 @@ rxa_service_func(void *args)
>  		}
>  	}
> 
> -	stats = &rx_adapter->stats;
> -	stats->rx_packets += rxa_intr_ring_dequeue(rx_adapter);
> -	stats->rx_packets += rxa_poll(rx_adapter);
> +	rxa_intr_ring_dequeue(rx_adapter);
> +	rxa_poll(rx_adapter);
> +
>  	rte_spinlock_unlock(&rx_adapter->rx_lock);
> +
>  	return 0;
>  }
> 
> @@ -1937,9 +1950,13 @@ rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
>  	if (rx_adapter->use_queue_event_buf) {
>  		struct eth_event_enqueue_buffer *event_buf =
>  			dev_info->rx_queue[rx_queue_id].event_buf;
> +		struct rte_event_eth_rx_adapter_stats *stats =
> +			dev_info->rx_queue[rx_queue_id].stats;
>  		rte_free(event_buf->events);
>  		rte_free(event_buf);
> +		rte_free(stats);
>  		dev_info->rx_queue[rx_queue_id].event_buf = NULL;
> +		dev_info->rx_queue[rx_queue_id].stats = NULL;
>  	}
>  }
> 
> @@ -1955,6 +1972,7 @@ rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
>  	int sintrq;
>  	struct rte_event *qi_ev;
>  	struct eth_event_enqueue_buffer *new_rx_buf = NULL;
> +	struct rte_event_eth_rx_adapter_stats *stats = NULL;
>  	uint16_t eth_dev_id = dev_info->dev->data->port_id;
>  	int ret;
> 
> @@ -2061,6 +2079,21 @@ rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
> 
>  	queue_info->event_buf = new_rx_buf;
> 
> +	/* Allocate storage for adapter queue stats */
> +	stats = rte_zmalloc_socket("rx_queue_stats",
> +				sizeof(*stats), 0,
> +				rte_eth_dev_socket_id(eth_dev_id));
> +	if (stats == NULL) {
> +		rte_free(new_rx_buf->events);
> +		rte_free(new_rx_buf);
> +		RTE_EDEV_LOG_ERR("Failed to allocate stats storage for"
> +				 " dev_id: %d queue_id: %d",
> +				 eth_dev_id, rx_queue_id);
> +		return -ENOMEM;
> +	}
> +
> +	queue_info->stats = stats;
> +
>  	return 0;
>  }
> 
> @@ -2819,6 +2852,15 @@ rte_event_eth_rx_adapter_stop(uint8_t id)
>  	return rxa_ctrl(id, 0);
>  }
> 
> +static inline void
> +rxa_queue_stats_reset(struct eth_rx_queue_info *queue_info)
> +{
> +	struct rte_event_eth_rx_adapter_stats *q_stats;
> +
> +	q_stats = queue_info->stats;
> +	memset(q_stats, 0, sizeof(*q_stats));
> +}
> +
>  int
>  rte_event_eth_rx_adapter_stats_get(uint8_t id,
>  			       struct rte_event_eth_rx_adapter_stats *stats)
> @@ -2829,7 +2871,9 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
>  	struct rte_event_eth_rx_adapter_stats dev_stats;
>  	struct rte_eventdev *dev;
>  	struct eth_device_info *dev_info;
> -	uint32_t i;
> +	struct eth_rx_queue_info *queue_info;
> +	struct rte_event_eth_rx_adapter_stats *q_stats;
> +	uint32_t i, j;
>  	int ret;
> 
>  	if (rxa_memzone_lookup())
> @@ -2843,8 +2887,32 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
> 
>  	dev = &rte_eventdevs[rx_adapter->eventdev_id];
>  	memset(stats, 0, sizeof(*stats));
> +
> +	if (rx_adapter->service_inited)
> +		*stats = rx_adapter->stats;
> +
>  	RTE_ETH_FOREACH_DEV(i) {
>  		dev_info = &rx_adapter->eth_devices[i];
> +
> +		if (rx_adapter->use_queue_event_buf  && dev_info->rx_queue) {

nitpick: extra space between use_queue_event_buf and &&.

> +
> +			for (j = 0; j < dev_info->dev->data->nb_rx_queues;
> +						j++) {

nitpick: align this line to "j = 0"

Rest of the patch set looks good to me.

With these changes, you can add my ack.

> +				queue_info = &dev_info->rx_queue[j];
> +				if (!queue_info->queue_enabled)
> +					continue;
> +				q_stats = queue_info->stats;
> +
> +				stats->rx_packets += q_stats->rx_packets;
> +				stats->rx_poll_count += q_stats->rx_poll_count;
> +				stats->rx_enq_count += q_stats->rx_enq_count;
> +				stats->rx_enq_retry += q_stats->rx_enq_retry;
> +				stats->rx_dropped += q_stats->rx_dropped;
> +				stats->rx_enq_block_cycles +=
> +						q_stats->rx_enq_block_cycles;
> +			}
> +		}
> +
>  		if (dev_info->internal_event_port == 0 ||
>  			dev->dev_ops->eth_rx_adapter_stats_get == NULL)
>  			continue;
> @@ -2857,19 +2925,69 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
>  		dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
>  	}
> 
> -	if (rx_adapter->service_inited)
> -		*stats = rx_adapter->stats;
> -
> +	buf = &rx_adapter->event_enqueue_buffer;
>  	stats->rx_packets += dev_stats_sum.rx_packets;
>  	stats->rx_enq_count += dev_stats_sum.rx_enq_count;
> +	stats->rx_event_buf_count = buf->count;
> +	stats->rx_event_buf_size = buf->events_size;
> 
> -	if (!rx_adapter->use_queue_event_buf) {
> -		buf = &rx_adapter->event_enqueue_buffer;
> -		stats->rx_event_buf_count = buf->count;
> -		stats->rx_event_buf_size = buf->events_size;
> -	} else {
> -		stats->rx_event_buf_count = 0;
> -		stats->rx_event_buf_size = 0;
> +	return 0;
> +}
> +
> +int
> +rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
> +		uint16_t eth_dev_id,
> +		uint16_t rx_queue_id,
> +		struct rte_event_eth_rx_adapter_queue_stats *stats)
> +{
> +	struct event_eth_rx_adapter *rx_adapter;
> +	struct eth_device_info *dev_info;
> +	struct eth_rx_queue_info *queue_info;
> +	struct eth_event_enqueue_buffer *event_buf;
> +	struct rte_event_eth_rx_adapter_stats *q_stats;
> +	struct rte_eventdev *dev;
> +
> +	if (rxa_memzone_lookup())
> +		return -ENOMEM;
> +
> +	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> +	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
> +
> +	rx_adapter = rxa_id_to_adapter(id);
> +
> +	if (rx_adapter == NULL || stats == NULL)
> +		return -EINVAL;
> +
> +	if (!rx_adapter->use_queue_event_buf)
> +		return -EINVAL;
> +
> +	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
> +		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
> +		return -EINVAL;
> +	}
> +
> +	dev_info = &rx_adapter->eth_devices[eth_dev_id];
> +	if (dev_info->rx_queue == NULL ||
> +	    !dev_info->rx_queue[rx_queue_id].queue_enabled) {
> +		RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
> +		return -EINVAL;
> +	}
> +
> +	queue_info = &dev_info->rx_queue[rx_queue_id];
> +	event_buf = queue_info->event_buf;
> +	q_stats = queue_info->stats;
> +
> +	stats->rx_event_buf_count = event_buf->count;
> +	stats->rx_event_buf_size = event_buf->events_size;
> +	stats->rx_packets = q_stats->rx_packets;
> +	stats->rx_poll_count = q_stats->rx_poll_count;
> +	stats->rx_dropped = q_stats->rx_dropped;
> +
> +	dev = &rte_eventdevs[rx_adapter->eventdev_id];
> +	if (dev->dev_ops->eth_rx_adapter_queue_stats_get != NULL) {
> +		return (*dev->dev_ops->eth_rx_adapter_queue_stats_get)(dev,
> +						&rte_eth_devices[eth_dev_id],
> +						rx_queue_id, stats);
>  	}
> 
>  	return 0;
> @@ -2881,7 +2999,8 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
>  	struct event_eth_rx_adapter *rx_adapter;
>  	struct rte_eventdev *dev;
>  	struct eth_device_info *dev_info;
> -	uint32_t i;
> +	struct eth_rx_queue_info *queue_info;
> +	uint32_t i, j;
> 
>  	if (rxa_memzone_lookup())
>  		return -ENOMEM;
> @@ -2893,8 +3012,21 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
>  		return -EINVAL;
> 
>  	dev = &rte_eventdevs[rx_adapter->eventdev_id];
> +
>  	RTE_ETH_FOREACH_DEV(i) {
>  		dev_info = &rx_adapter->eth_devices[i];
> +
> +		if (rx_adapter->use_queue_event_buf  && dev_info->rx_queue) {
> +
> +			for (j = 0; j < dev_info->dev->data->nb_rx_queues;
> +						j++) {
> +				queue_info = &dev_info->rx_queue[j];
> +				if (!queue_info->queue_enabled)
> +					continue;
> +				rxa_queue_stats_reset(queue_info);
> +			}
> +		}
> +
>  		if (dev_info->internal_event_port == 0 ||
>  			dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
>  			continue;
> @@ -2903,6 +3035,56 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
>  	}
> 
>  	memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
> +
> +	return 0;
> +}
> +
> +int
> +rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
> +		uint16_t eth_dev_id,
> +		uint16_t rx_queue_id)
> +{
> +	struct event_eth_rx_adapter *rx_adapter;
> +	struct eth_device_info *dev_info;
> +	struct eth_rx_queue_info *queue_info;
> +	struct rte_eventdev *dev;
> +
> +	if (rxa_memzone_lookup())
> +		return -ENOMEM;
> +
> +	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> +	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
> +
> +	rx_adapter = rxa_id_to_adapter(id);
> +	if (rx_adapter == NULL)
> +		return -EINVAL;
> +
> +	if (!rx_adapter->use_queue_event_buf)
> +		return -EINVAL;
> +
> +	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
> +		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
> +		return -EINVAL;
> +	}
> +
> +	dev_info = &rx_adapter->eth_devices[eth_dev_id];
> +
> +	if (dev_info->rx_queue == NULL ||
> +	    !dev_info->rx_queue[rx_queue_id].queue_enabled) {
> +		RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
> +		return -EINVAL;
> +	}
> +
> +	queue_info = &dev_info->rx_queue[rx_queue_id];
> +	rxa_queue_stats_reset(queue_info);
> +
> +	dev = &rte_eventdevs[rx_adapter->eventdev_id];
> +	if (dev->dev_ops->eth_rx_adapter_queue_stats_reset != NULL) {
> +		return (*dev->dev_ops->eth_rx_adapter_queue_stats_reset)(dev,
> +						&rte_eth_devices[eth_dev_id],
> +						rx_queue_id);
> +	}
> +
>  	return 0;
>  }
> 
> diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
> index ab625f7273..9546d792e9 100644
> --- a/lib/eventdev/rte_event_eth_rx_adapter.h
> +++ b/lib/eventdev/rte_event_eth_rx_adapter.h
> @@ -35,6 +35,8 @@
>   *  - rte_event_eth_rx_adapter_stats_get()
>   *  - rte_event_eth_rx_adapter_stats_reset()
>   *  - rte_event_eth_rx_adapter_queue_conf_get()
> + *  - rte_event_eth_rx_adapter_queue_stats_get()
> + *  - rte_event_eth_rx_adapter_queue_stats_reset()
>   *
>   * The application creates an ethernet to event adapter using
>   * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create()
> @@ -204,6 +206,23 @@ struct rte_event_eth_rx_adapter_queue_conf {
>  	/**< event buffer size for this queue */
>  };
> 
> +/**
> + * A structure used to retrieve statistics for an
> + * eth rx adapter queue.
> + */
> +struct rte_event_eth_rx_adapter_queue_stats {
> +	uint64_t rx_event_buf_count;
> +	/**< Rx event buffered count */
> +	uint64_t rx_event_buf_size;
> +	/**< Rx event buffer size */
> +	uint64_t rx_poll_count;
> +	/**< Receive queue poll count */
> +	uint64_t rx_packets;
> +	/**< Received packet count */
> +	uint64_t rx_dropped;
> +	/**< Received packet dropped count */
> +};
> +
>  /**
>   * A structure used to retrieve statistics for an eth rx adapter instance.
>   */
> @@ -617,6 +636,53 @@ int rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
>  			uint16_t rx_queue_id,
>  			struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
> 
> +/**
> + * Retrieve Rx queue statistics.
> + *
> + * @param id
> + *  Adapter identifier.
> + *
> + * @param eth_dev_id
> + *  Port identifier of Ethernet device.
> + *
> + * @param rx_queue_id
> + *  Ethernet device receive queue index.
> + *
> + * @param[out] stats
> + *  Pointer to struct rte_event_eth_rx_adapter_queue_stats
> + *
> + * @return
> + *  - 0: Success, queue buffer stats retrieved.
> + *  - <0: Error code on failure.
> + */
> +__rte_experimental
> +int
> +rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
> +		uint16_t eth_dev_id,
> +		uint16_t rx_queue_id,
> +		struct rte_event_eth_rx_adapter_queue_stats *stats);
> +
> +/**
> + * Reset Rx queue statistics.
> + *
> + * @param id
> + *  Adapter identifier.
> + *
> + * @param eth_dev_id
> + *  Port identifier of Ethernet device.
> + *
> + * @param rx_queue_id
> + *  Ethernet device receive queue index.
> + *
> + * @return
> + *  - 0: Success, queue buffer stats retrieved.
> + *  - <0: Error code on failure.
> + */
> +__rte_experimental
> +int
> +rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
> +		uint16_t eth_dev_id,
> +		uint16_t rx_queue_id);
> 
>  #ifdef __cplusplus
>  }
> diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
> index cd37164141..ade1f1182e 100644
> --- a/lib/eventdev/version.map
> +++ b/lib/eventdev/version.map
> @@ -103,6 +103,8 @@ EXPERIMENTAL {
>  	# added in 21.11
>  	rte_event_eth_rx_adapter_create_with_params;
>  	rte_event_eth_rx_adapter_queue_conf_get;
> +	rte_event_eth_rx_adapter_queue_stats_get;
> +	rte_event_eth_rx_adapter_queue_stats_reset;
>  };
> 
>  INTERNAL {
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v3 1/3] eventdev/eth_rx: add queue stats get and reset APIs
  2021-10-28  7:06 ` [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
                     ` (2 preceding siblings ...)
  2021-10-28  8:10   ` [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs Jayatheerthan, Jay
@ 2021-10-28 10:27   ` Naga Harish K S V
  2021-10-28 10:27     ` [dpdk-dev] [PATCH v3 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
                       ` (2 more replies)
  2021-10-28 10:37   ` Naga Harish K S V
  4 siblings, 3 replies; 17+ messages in thread
From: Naga Harish K S V @ 2021-10-28 10:27 UTC (permalink / raw)
  To: jerinj, jay.jayatheerthan; +Cc: dev

This patch adds new api ``rte_event_eth_rx_adapter_queue_stats_get`` to
retrieve queue stats. The queue stats are in the format
``struct rte_event_eth_rx_adapter_queue_stats``.

For resetting the queue stats,
``rte_event_eth_rx_adapter_queue_stats_reset`` api is added.

The adapter stats_get and stats_reset apis are also updated to
handle queue level event buffer use case.

Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
---
v2:
* added pmd callback support for adapter queue_stats_get and
  queue_stats_reset apis.

v3:
* addressed coding style review comments
---
 .../prog_guide/event_ethernet_rx_adapter.rst  |  11 +
 lib/eventdev/eventdev_pmd.h                   |  52 ++++
 lib/eventdev/rte_event_eth_rx_adapter.c       | 268 +++++++++++++++---
 lib/eventdev/rte_event_eth_rx_adapter.h       |  66 +++++
 lib/eventdev/version.map                      |   2 +
 5 files changed, 356 insertions(+), 43 deletions(-)

diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
index 8b58130fc5..67b11e1563 100644
--- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
+++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
@@ -166,6 +166,17 @@ flags for handling received packets, event queue identifier, scheduler type,
 event priority, polling frequency of the receive queue and flow identifier
 in struct ``rte_event_eth_rx_adapter_queue_conf``.
 
+Getting and resetting Adapter queue stats
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``rte_event_eth_rx_adapter_queue_stats_get()`` function reports
+adapter queue counters defined in struct ``rte_event_eth_rx_adapter_queue_stats``.
+This function reports queue level stats only when queue level event buffer is
+used otherwise it returns -EINVAL.
+
+The ``rte_event_eth_rx_adapter_queue_stats_reset`` function can be used to
+reset queue level stats when queue level event buffer is in use.
+
 Interrupt Based Rx Queues
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
 
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index d009e24309..3ba49d1fd4 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -749,6 +749,53 @@ typedef int (*eventdev_eth_rx_adapter_stats_get)
 typedef int (*eventdev_eth_rx_adapter_stats_reset)
 			(const struct rte_eventdev *dev,
 			const struct rte_eth_dev *eth_dev);
+
+struct rte_event_eth_rx_adapter_queue_stats;
+
+/**
+ * Retrieve ethernet Rx adapter queue statistics.
+ *
+ * @param dev
+ *   Event device pointer
+ *
+ * @param eth_dev
+ *   Ethernet device pointer
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @param[out] q_stats
+ *   Pointer to queue stats structure
+ *
+ * @return
+ *   Return 0 on success.
+ */
+typedef int (*eventdev_eth_rx_adapter_q_stats_get)
+			(const struct rte_eventdev *dev,
+			 const struct rte_eth_dev *eth_dev,
+			 uint16_t rx_queue_id,
+			 struct rte_event_eth_rx_adapter_queue_stats *q_stats);
+
+/**
+ * Reset ethernet Rx adapter queue statistics.
+ *
+ * @param dev
+ *   Event device pointer
+ *
+ * @param eth_dev
+ *   Ethernet device pointer
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @return
+ *   Return 0 on success.
+ */
+typedef int (*eventdev_eth_rx_adapter_q_stats_reset)
+			(const struct rte_eventdev *dev,
+			 const struct rte_eth_dev *eth_dev,
+			 uint16_t rx_queue_id);
+
 /**
  * Start eventdev selftest.
  *
@@ -1224,6 +1271,11 @@ struct eventdev_ops {
 	eventdev_crypto_adapter_stats_reset crypto_adapter_stats_reset;
 	/**< Reset crypto stats */
 
+	eventdev_eth_rx_adapter_q_stats_get eth_rx_adapter_queue_stats_get;
+	/**< Get ethernet Rx queue stats */
+	eventdev_eth_rx_adapter_q_stats_reset eth_rx_adapter_queue_stats_reset;
+	/**< Reset ethernet Rx queue stats */
+
 	eventdev_eth_tx_adapter_caps_get_t eth_tx_adapter_caps_get;
 	/**< Get ethernet Tx adapter capabilities */
 
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index a175c61551..3adec52eac 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -245,6 +245,10 @@ struct eth_rx_queue_info {
 	uint64_t event;
 	struct eth_rx_vector_data vector_data;
 	struct eth_event_enqueue_buffer *event_buf;
+	/* use adapter stats struct for queue level stats,
+	 * as same stats need to be updated for adapter and queue
+	 */
+	struct rte_event_eth_rx_adapter_stats *stats;
 };
 
 static struct event_eth_rx_adapter **event_eth_rx_adapter;
@@ -268,14 +272,18 @@ rxa_validate_id(uint8_t id)
 
 static inline struct eth_event_enqueue_buffer *
 rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
-		  uint16_t rx_queue_id)
+		  uint16_t rx_queue_id,
+		  struct rte_event_eth_rx_adapter_stats **stats)
 {
 	if (rx_adapter->use_queue_event_buf) {
 		struct eth_device_info *dev_info =
 			&rx_adapter->eth_devices[eth_dev_id];
+		*stats = dev_info->rx_queue[rx_queue_id].stats;
 		return dev_info->rx_queue[rx_queue_id].event_buf;
-	} else
+	} else {
+		*stats = &rx_adapter->stats;
 		return &rx_adapter->event_enqueue_buffer;
+	}
 }
 
 #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
@@ -766,9 +774,9 @@ rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
 /* Enqueue buffered events to event device */
 static inline uint16_t
 rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
-		       struct eth_event_enqueue_buffer *buf)
+		       struct eth_event_enqueue_buffer *buf,
+		       struct rte_event_eth_rx_adapter_stats *stats)
 {
-	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
 	uint16_t count = buf->last ? buf->last - buf->head : buf->count;
 
 	if (!count)
@@ -883,7 +891,8 @@ rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
 static inline void
 rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
 		 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
-		 struct eth_event_enqueue_buffer *buf)
+		 struct eth_event_enqueue_buffer *buf,
+		 struct rte_event_eth_rx_adapter_stats *stats)
 {
 	uint32_t i;
 	struct eth_device_info *dev_info =
@@ -954,7 +963,7 @@ rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
 		else
 			num = nb_cb;
 		if (dropped)
-			rx_adapter->stats.rx_dropped += dropped;
+			stats->rx_dropped += dropped;
 	}
 
 	buf->count += num;
@@ -985,11 +994,10 @@ rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
 static inline uint32_t
 rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
 	   uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
-	   int *rxq_empty, struct eth_event_enqueue_buffer *buf)
+	   int *rxq_empty, struct eth_event_enqueue_buffer *buf,
+	   struct rte_event_eth_rx_adapter_stats *stats)
 {
 	struct rte_mbuf *mbufs[BATCH_SIZE];
-	struct rte_event_eth_rx_adapter_stats *stats =
-					&rx_adapter->stats;
 	uint16_t n;
 	uint32_t nb_rx = 0;
 
@@ -1000,7 +1008,7 @@ rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
 	 */
 	while (rxa_pkt_buf_available(buf)) {
 		if (buf->count >= BATCH_SIZE)
-			rxa_flush_event_buffer(rx_adapter, buf);
+			rxa_flush_event_buffer(rx_adapter, buf, stats);
 
 		stats->rx_poll_count++;
 		n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
@@ -1009,14 +1017,17 @@ rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
 				*rxq_empty = 1;
 			break;
 		}
-		rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf);
+		rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf,
+				 stats);
 		nb_rx += n;
 		if (rx_count + nb_rx > max_rx)
 			break;
 	}
 
 	if (buf->count > 0)
-		rxa_flush_event_buffer(rx_adapter, buf);
+		rxa_flush_event_buffer(rx_adapter, buf, stats);
+
+	stats->rx_packets += nb_rx;
 
 	return nb_rx;
 }
@@ -1135,28 +1146,30 @@ rxa_intr_thread(void *arg)
 /* Dequeue <port, q> from interrupt ring and enqueue received
  * mbufs to eventdev
  */
-static inline uint32_t
+static inline void
 rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t n;
 	uint32_t nb_rx = 0;
 	int rxq_empty;
 	struct eth_event_enqueue_buffer *buf;
+	struct rte_event_eth_rx_adapter_stats *stats;
 	rte_spinlock_t *ring_lock;
 	uint8_t max_done = 0;
 
 	if (rx_adapter->num_rx_intr == 0)
-		return 0;
+		return;
 
 	if (rte_ring_count(rx_adapter->intr_ring) == 0
 		&& !rx_adapter->qd_valid)
-		return 0;
+		return;
 
 	buf = &rx_adapter->event_enqueue_buffer;
+	stats = &rx_adapter->stats;
 	ring_lock = &rx_adapter->intr_ring_lock;
 
 	if (buf->count >= BATCH_SIZE)
-		rxa_flush_event_buffer(rx_adapter, buf);
+		rxa_flush_event_buffer(rx_adapter, buf, stats);
 
 	while (rxa_pkt_buf_available(buf)) {
 		struct eth_device_info *dev_info;
@@ -1208,7 +1221,7 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 					continue;
 				n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
 					rx_adapter->max_nb_rx,
-					&rxq_empty, buf);
+					&rxq_empty, buf, stats);
 				nb_rx += n;
 
 				enq_buffer_full = !rxq_empty && n == 0;
@@ -1229,7 +1242,7 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 		} else {
 			n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
 				rx_adapter->max_nb_rx,
-				&rxq_empty, buf);
+				&rxq_empty, buf, stats);
 			rx_adapter->qd_valid = !rxq_empty;
 			nb_rx += n;
 			if (nb_rx > rx_adapter->max_nb_rx)
@@ -1239,7 +1252,6 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 
 done:
 	rx_adapter->stats.rx_intr_packets += nb_rx;
-	return nb_rx;
 }
 
 /*
@@ -1255,12 +1267,13 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
  * the hypervisor's switching layer where adjustments can be made to deal with
  * it.
  */
-static inline uint32_t
+static inline void
 rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t num_queue;
 	uint32_t nb_rx = 0;
 	struct eth_event_enqueue_buffer *buf = NULL;
+	struct rte_event_eth_rx_adapter_stats *stats = NULL;
 	uint32_t wrr_pos;
 	uint32_t max_nb_rx;
 
@@ -1273,24 +1286,24 @@ rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 		uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
 		uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
 
-		buf = rxa_event_buf_get(rx_adapter, d, qid);
+		buf = rxa_event_buf_get(rx_adapter, d, qid, &stats);
 
 		/* Don't do a batch dequeue from the rx queue if there isn't
 		 * enough space in the enqueue buffer.
 		 */
 		if (buf->count >= BATCH_SIZE)
-			rxa_flush_event_buffer(rx_adapter, buf);
+			rxa_flush_event_buffer(rx_adapter, buf, stats);
 		if (!rxa_pkt_buf_available(buf)) {
 			if (rx_adapter->use_queue_event_buf)
 				goto poll_next_entry;
 			else {
 				rx_adapter->wrr_pos = wrr_pos;
-				return nb_rx;
+				return;
 			}
 		}
 
 		nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
-				NULL, buf);
+				NULL, buf, stats);
 		if (nb_rx > max_nb_rx) {
 			rx_adapter->wrr_pos =
 				    (wrr_pos + 1) % rx_adapter->wrr_len;
@@ -1301,7 +1314,6 @@ rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 		if (++wrr_pos == rx_adapter->wrr_len)
 			wrr_pos = 0;
 	}
-	return nb_rx;
 }
 
 static void
@@ -1309,12 +1321,13 @@ rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 {
 	struct event_eth_rx_adapter *rx_adapter = arg;
 	struct eth_event_enqueue_buffer *buf = NULL;
+	struct rte_event_eth_rx_adapter_stats *stats = NULL;
 	struct rte_event *ev;
 
-	buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue);
+	buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue, &stats);
 
 	if (buf->count)
-		rxa_flush_event_buffer(rx_adapter, buf);
+		rxa_flush_event_buffer(rx_adapter, buf, stats);
 
 	if (vec->vector_ev->nb_elem == 0)
 		return;
@@ -1333,7 +1346,6 @@ static int
 rxa_service_func(void *args)
 {
 	struct event_eth_rx_adapter *rx_adapter = args;
-	struct rte_event_eth_rx_adapter_stats *stats;
 
 	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
 		return 0;
@@ -1360,10 +1372,11 @@ rxa_service_func(void *args)
 		}
 	}
 
-	stats = &rx_adapter->stats;
-	stats->rx_packets += rxa_intr_ring_dequeue(rx_adapter);
-	stats->rx_packets += rxa_poll(rx_adapter);
+	rxa_intr_ring_dequeue(rx_adapter);
+	rxa_poll(rx_adapter);
+
 	rte_spinlock_unlock(&rx_adapter->rx_lock);
+
 	return 0;
 }
 
@@ -1937,9 +1950,13 @@ rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
 	if (rx_adapter->use_queue_event_buf) {
 		struct eth_event_enqueue_buffer *event_buf =
 			dev_info->rx_queue[rx_queue_id].event_buf;
+		struct rte_event_eth_rx_adapter_stats *stats =
+			dev_info->rx_queue[rx_queue_id].stats;
 		rte_free(event_buf->events);
 		rte_free(event_buf);
+		rte_free(stats);
 		dev_info->rx_queue[rx_queue_id].event_buf = NULL;
+		dev_info->rx_queue[rx_queue_id].stats = NULL;
 	}
 }
 
@@ -1955,6 +1972,7 @@ rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
 	int sintrq;
 	struct rte_event *qi_ev;
 	struct eth_event_enqueue_buffer *new_rx_buf = NULL;
+	struct rte_event_eth_rx_adapter_stats *stats = NULL;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
 	int ret;
 
@@ -2061,6 +2079,21 @@ rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
 
 	queue_info->event_buf = new_rx_buf;
 
+	/* Allocate storage for adapter queue stats */
+	stats = rte_zmalloc_socket("rx_queue_stats",
+				sizeof(*stats), 0,
+				rte_eth_dev_socket_id(eth_dev_id));
+	if (stats == NULL) {
+		rte_free(new_rx_buf->events);
+		rte_free(new_rx_buf);
+		RTE_EDEV_LOG_ERR("Failed to allocate stats storage for"
+				 " dev_id: %d queue_id: %d",
+				 eth_dev_id, rx_queue_id);
+		return -ENOMEM;
+	}
+
+	queue_info->stats = stats;
+
 	return 0;
 }
 
@@ -2819,6 +2852,15 @@ rte_event_eth_rx_adapter_stop(uint8_t id)
 	return rxa_ctrl(id, 0);
 }
 
+static inline void
+rxa_queue_stats_reset(struct eth_rx_queue_info *queue_info)
+{
+	struct rte_event_eth_rx_adapter_stats *q_stats;
+
+	q_stats = queue_info->stats;
+	memset(q_stats, 0, sizeof(*q_stats));
+}
+
 int
 rte_event_eth_rx_adapter_stats_get(uint8_t id,
 			       struct rte_event_eth_rx_adapter_stats *stats)
@@ -2829,7 +2871,9 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 	struct rte_event_eth_rx_adapter_stats dev_stats;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
-	uint32_t i;
+	struct eth_rx_queue_info *queue_info;
+	struct rte_event_eth_rx_adapter_stats *q_stats;
+	uint32_t i, j;
 	int ret;
 
 	if (rxa_memzone_lookup())
@@ -2843,8 +2887,32 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 
 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
 	memset(stats, 0, sizeof(*stats));
+
+	if (rx_adapter->service_inited)
+		*stats = rx_adapter->stats;
+
 	RTE_ETH_FOREACH_DEV(i) {
 		dev_info = &rx_adapter->eth_devices[i];
+
+		if (rx_adapter->use_queue_event_buf && dev_info->rx_queue) {
+
+			for (j = 0; j < dev_info->dev->data->nb_rx_queues;
+			     j++) {
+				queue_info = &dev_info->rx_queue[j];
+				if (!queue_info->queue_enabled)
+					continue;
+				q_stats = queue_info->stats;
+
+				stats->rx_packets += q_stats->rx_packets;
+				stats->rx_poll_count += q_stats->rx_poll_count;
+				stats->rx_enq_count += q_stats->rx_enq_count;
+				stats->rx_enq_retry += q_stats->rx_enq_retry;
+				stats->rx_dropped += q_stats->rx_dropped;
+				stats->rx_enq_block_cycles +=
+						q_stats->rx_enq_block_cycles;
+			}
+		}
+
 		if (dev_info->internal_event_port == 0 ||
 			dev->dev_ops->eth_rx_adapter_stats_get == NULL)
 			continue;
@@ -2857,19 +2925,69 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 		dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
 	}
 
-	if (rx_adapter->service_inited)
-		*stats = rx_adapter->stats;
-
+	buf = &rx_adapter->event_enqueue_buffer;
 	stats->rx_packets += dev_stats_sum.rx_packets;
 	stats->rx_enq_count += dev_stats_sum.rx_enq_count;
+	stats->rx_event_buf_count = buf->count;
+	stats->rx_event_buf_size = buf->events_size;
 
-	if (!rx_adapter->use_queue_event_buf) {
-		buf = &rx_adapter->event_enqueue_buffer;
-		stats->rx_event_buf_count = buf->count;
-		stats->rx_event_buf_size = buf->events_size;
-	} else {
-		stats->rx_event_buf_count = 0;
-		stats->rx_event_buf_size = 0;
+	return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id,
+		struct rte_event_eth_rx_adapter_queue_stats *stats)
+{
+	struct event_eth_rx_adapter *rx_adapter;
+	struct eth_device_info *dev_info;
+	struct eth_rx_queue_info *queue_info;
+	struct eth_event_enqueue_buffer *event_buf;
+	struct rte_event_eth_rx_adapter_stats *q_stats;
+	struct rte_eventdev *dev;
+
+	if (rxa_memzone_lookup())
+		return -ENOMEM;
+
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	rx_adapter = rxa_id_to_adapter(id);
+
+	if (rx_adapter == NULL || stats == NULL)
+		return -EINVAL;
+
+	if (!rx_adapter->use_queue_event_buf)
+		return -EINVAL;
+
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
+		return -EINVAL;
+	}
+
+	dev_info = &rx_adapter->eth_devices[eth_dev_id];
+	if (dev_info->rx_queue == NULL ||
+	    !dev_info->rx_queue[rx_queue_id].queue_enabled) {
+		RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
+		return -EINVAL;
+	}
+
+	queue_info = &dev_info->rx_queue[rx_queue_id];
+	event_buf = queue_info->event_buf;
+	q_stats = queue_info->stats;
+
+	stats->rx_event_buf_count = event_buf->count;
+	stats->rx_event_buf_size = event_buf->events_size;
+	stats->rx_packets = q_stats->rx_packets;
+	stats->rx_poll_count = q_stats->rx_poll_count;
+	stats->rx_dropped = q_stats->rx_dropped;
+
+	dev = &rte_eventdevs[rx_adapter->eventdev_id];
+	if (dev->dev_ops->eth_rx_adapter_queue_stats_get != NULL) {
+		return (*dev->dev_ops->eth_rx_adapter_queue_stats_get)(dev,
+						&rte_eth_devices[eth_dev_id],
+						rx_queue_id, stats);
 	}
 
 	return 0;
@@ -2881,7 +2999,8 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
-	uint32_t i;
+	struct eth_rx_queue_info *queue_info;
+	uint32_t i, j;
 
 	if (rxa_memzone_lookup())
 		return -ENOMEM;
@@ -2893,8 +3012,21 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 		return -EINVAL;
 
 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
+
 	RTE_ETH_FOREACH_DEV(i) {
 		dev_info = &rx_adapter->eth_devices[i];
+
+		if (rx_adapter->use_queue_event_buf  && dev_info->rx_queue) {
+
+			for (j = 0; j < dev_info->dev->data->nb_rx_queues;
+						j++) {
+				queue_info = &dev_info->rx_queue[j];
+				if (!queue_info->queue_enabled)
+					continue;
+				rxa_queue_stats_reset(queue_info);
+			}
+		}
+
 		if (dev_info->internal_event_port == 0 ||
 			dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
 			continue;
@@ -2903,6 +3035,56 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 	}
 
 	memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
+
+	return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id)
+{
+	struct event_eth_rx_adapter *rx_adapter;
+	struct eth_device_info *dev_info;
+	struct eth_rx_queue_info *queue_info;
+	struct rte_eventdev *dev;
+
+	if (rxa_memzone_lookup())
+		return -ENOMEM;
+
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	rx_adapter = rxa_id_to_adapter(id);
+	if (rx_adapter == NULL)
+		return -EINVAL;
+
+	if (!rx_adapter->use_queue_event_buf)
+		return -EINVAL;
+
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
+		return -EINVAL;
+	}
+
+	dev_info = &rx_adapter->eth_devices[eth_dev_id];
+
+	if (dev_info->rx_queue == NULL ||
+	    !dev_info->rx_queue[rx_queue_id].queue_enabled) {
+		RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
+		return -EINVAL;
+	}
+
+	queue_info = &dev_info->rx_queue[rx_queue_id];
+	rxa_queue_stats_reset(queue_info);
+
+	dev = &rte_eventdevs[rx_adapter->eventdev_id];
+	if (dev->dev_ops->eth_rx_adapter_queue_stats_reset != NULL) {
+		return (*dev->dev_ops->eth_rx_adapter_queue_stats_reset)(dev,
+						&rte_eth_devices[eth_dev_id],
+						rx_queue_id);
+	}
+
 	return 0;
 }
 
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
index ab625f7273..9546d792e9 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/eventdev/rte_event_eth_rx_adapter.h
@@ -35,6 +35,8 @@
  *  - rte_event_eth_rx_adapter_stats_get()
  *  - rte_event_eth_rx_adapter_stats_reset()
  *  - rte_event_eth_rx_adapter_queue_conf_get()
+ *  - rte_event_eth_rx_adapter_queue_stats_get()
+ *  - rte_event_eth_rx_adapter_queue_stats_reset()
  *
  * The application creates an ethernet to event adapter using
  * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create()
@@ -204,6 +206,23 @@ struct rte_event_eth_rx_adapter_queue_conf {
 	/**< event buffer size for this queue */
 };
 
+/**
+ * A structure used to retrieve statistics for an
+ * eth rx adapter queue.
+ */
+struct rte_event_eth_rx_adapter_queue_stats {
+	uint64_t rx_event_buf_count;
+	/**< Rx event buffered count */
+	uint64_t rx_event_buf_size;
+	/**< Rx event buffer size */
+	uint64_t rx_poll_count;
+	/**< Receive queue poll count */
+	uint64_t rx_packets;
+	/**< Received packet count */
+	uint64_t rx_dropped;
+	/**< Received packet dropped count */
+};
+
 /**
  * A structure used to retrieve statistics for an eth rx adapter instance.
  */
@@ -617,6 +636,53 @@ int rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
 			uint16_t rx_queue_id,
 			struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
 
+/**
+ * Retrieve Rx queue statistics.
+ *
+ * @param id
+ *  Adapter identifier.
+ *
+ * @param eth_dev_id
+ *  Port identifier of Ethernet device.
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @param[out] stats
+ *  Pointer to struct rte_event_eth_rx_adapter_queue_stats
+ *
+ * @return
+ *  - 0: Success, queue buffer stats retrieved.
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id,
+		struct rte_event_eth_rx_adapter_queue_stats *stats);
+
+/**
+ * Reset Rx queue statistics.
+ *
+ * @param id
+ *  Adapter identifier.
+ *
+ * @param eth_dev_id
+ *  Port identifier of Ethernet device.
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @return
+ *  - 0: Success, queue buffer stats retrieved.
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id);
 
 #ifdef __cplusplus
 }
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index cd37164141..ade1f1182e 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -103,6 +103,8 @@ EXPERIMENTAL {
 	# added in 21.11
 	rte_event_eth_rx_adapter_create_with_params;
 	rte_event_eth_rx_adapter_queue_conf_get;
+	rte_event_eth_rx_adapter_queue_stats_get;
+	rte_event_eth_rx_adapter_queue_stats_reset;
 };
 
 INTERNAL {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v3 2/3] eventdev/eth_rx: support telemetry
  2021-10-28 10:27   ` [dpdk-dev] [PATCH v3 " Naga Harish K S V
@ 2021-10-28 10:27     ` Naga Harish K S V
  2021-10-28 10:27     ` [dpdk-dev] [PATCH v3 3/3] test/event: add unit test for Rx adapter Naga Harish K S V
  2021-11-02 11:00     ` [dpdk-dev] [PATCH v3 1/3] eventdev/eth_rx: add queue stats get and reset APIs Jerin Jacob
  2 siblings, 0 replies; 17+ messages in thread
From: Naga Harish K S V @ 2021-10-28 10:27 UTC (permalink / raw)
  To: jerinj, jay.jayatheerthan; +Cc: dev

Added telemetry support for rxa_queue_stats and
rxa_queue_stats_reset to get and reset rx queue
stats respectively

Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
---
 lib/eventdev/rte_event_eth_rx_adapter.c | 124 ++++++++++++++++++++++++
 1 file changed, 124 insertions(+)

diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index 3adec52eac..4e0829c492 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -3345,6 +3345,122 @@ handle_rxa_get_queue_conf(const char *cmd __rte_unused,
 	return 0;
 }
 
+static int
+handle_rxa_get_queue_stats(const char *cmd __rte_unused,
+			   const char *params,
+			   struct rte_tel_data *d)
+{
+	uint8_t rx_adapter_id;
+	uint16_t rx_queue_id;
+	int eth_dev_id;
+	char *token, *l_params;
+	struct rte_event_eth_rx_adapter_queue_stats q_stats;
+
+	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+		return -1;
+
+	/* Get Rx adapter ID from parameter string */
+	l_params = strdup(params);
+	token = strtok(l_params, ",");
+	rx_adapter_id = strtoul(token, NULL, 10);
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get device ID from parameter string */
+	eth_dev_id = strtoul(token, NULL, 10);
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get Rx queue ID from parameter string */
+	rx_queue_id = strtoul(token, NULL, 10);
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+		return -EINVAL;
+	}
+
+	token = strtok(NULL, "\0");
+	if (token != NULL)
+		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+				 " telemetry command, igrnoring");
+
+	if (rte_event_eth_rx_adapter_queue_stats_get(rx_adapter_id, eth_dev_id,
+						    rx_queue_id, &q_stats)) {
+		RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue stats");
+		return -1;
+	}
+
+	rte_tel_data_start_dict(d);
+	rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
+	rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
+	rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
+	RXA_ADD_DICT(q_stats, rx_event_buf_count);
+	RXA_ADD_DICT(q_stats, rx_event_buf_size);
+	RXA_ADD_DICT(q_stats, rx_poll_count);
+	RXA_ADD_DICT(q_stats, rx_packets);
+	RXA_ADD_DICT(q_stats, rx_dropped);
+
+	return 0;
+}
+
+static int
+handle_rxa_queue_stats_reset(const char *cmd __rte_unused,
+			     const char *params,
+			     struct rte_tel_data *d __rte_unused)
+{
+	uint8_t rx_adapter_id;
+	uint16_t rx_queue_id;
+	int eth_dev_id;
+	char *token, *l_params;
+
+	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+		return -1;
+
+	/* Get Rx adapter ID from parameter string */
+	l_params = strdup(params);
+	token = strtok(l_params, ",");
+	rx_adapter_id = strtoul(token, NULL, 10);
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get device ID from parameter string */
+	eth_dev_id = strtoul(token, NULL, 10);
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get Rx queue ID from parameter string */
+	rx_queue_id = strtoul(token, NULL, 10);
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+		return -EINVAL;
+	}
+
+	token = strtok(NULL, "\0");
+	if (token != NULL)
+		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+				 " telemetry command, igrnoring");
+
+	if (rte_event_eth_rx_adapter_queue_stats_reset(rx_adapter_id,
+						       eth_dev_id,
+						       rx_queue_id)) {
+		RTE_EDEV_LOG_ERR("Failed to reset Rx adapter queue stats");
+		return -1;
+	}
+
+	return 0;
+}
+
 RTE_INIT(rxa_init_telemetry)
 {
 	rte_telemetry_register_cmd("/eventdev/rxa_stats",
@@ -3358,4 +3474,12 @@ RTE_INIT(rxa_init_telemetry)
 	rte_telemetry_register_cmd("/eventdev/rxa_queue_conf",
 		handle_rxa_get_queue_conf,
 		"Returns Rx queue config. Parameter: rxa_id, dev_id, queue_id");
+
+	rte_telemetry_register_cmd("/eventdev/rxa_queue_stats",
+		handle_rxa_get_queue_stats,
+		"Returns Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
+
+	rte_telemetry_register_cmd("/eventdev/rxa_queue_stats_reset",
+		handle_rxa_queue_stats_reset,
+		"Reset Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v3 3/3] test/event: add unit test for Rx adapter
  2021-10-28 10:27   ` [dpdk-dev] [PATCH v3 " Naga Harish K S V
  2021-10-28 10:27     ` [dpdk-dev] [PATCH v3 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
@ 2021-10-28 10:27     ` Naga Harish K S V
  2021-11-02 11:00     ` [dpdk-dev] [PATCH v3 1/3] eventdev/eth_rx: add queue stats get and reset APIs Jerin Jacob
  2 siblings, 0 replies; 17+ messages in thread
From: Naga Harish K S V @ 2021-10-28 10:27 UTC (permalink / raw)
  To: jerinj, jay.jayatheerthan; +Cc: dev

add unit test for rte_event_eth_rx_adapter_queue_stats_get() and
rte_event_eth_rx_adapter_queue_stats_reset() apis.

Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
---
 app/test/test_event_eth_rx_adapter.c | 60 ++++++++++++++++++++++++++++
 1 file changed, 60 insertions(+)

diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c
index 1419f6f64d..7cb91b152f 100644
--- a/app/test/test_event_eth_rx_adapter.c
+++ b/app/test/test_event_eth_rx_adapter.c
@@ -471,6 +471,64 @@ adapter_queue_event_buf_test(void)
 	return TEST_SUCCESS;
 }
 
+static int
+adapter_queue_stats_test(void)
+{
+	int err;
+	struct rte_event ev;
+	uint32_t cap;
+	struct rte_event_eth_rx_adapter_queue_conf queue_config = {0};
+	struct rte_event_eth_rx_adapter_queue_stats q_stats;
+
+	err = rte_event_eth_rx_adapter_queue_stats_get(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0,
+						&q_stats);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_stats_reset(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+	err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
+					 &cap);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	ev.queue_id = 0;
+	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+	ev.priority = 0;
+
+	queue_config.rx_queue_flags = 0;
+	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
+		ev.flow_id = 1;
+		queue_config.rx_queue_flags =
+			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
+	}
+	queue_config.ev = ev;
+	queue_config.servicing_weight = 1;
+	queue_config.event_buf_size = 1024;
+
+	err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+					TEST_ETHDEV_ID, 0,
+					&queue_config);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_stats_get(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0,
+						&q_stats);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_stats_reset(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+						TEST_ETHDEV_ID,
+						0);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	return TEST_SUCCESS;
+}
+
 static void
 adapter_free(void)
 {
@@ -940,6 +998,8 @@ static struct unit_test_suite event_eth_rx_tests = {
 		TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_conf),
 		TEST_CASE_ST(adapter_create_with_params, adapter_free,
 			     adapter_queue_event_buf_test),
+		TEST_CASE_ST(adapter_create_with_params, adapter_free,
+			     adapter_queue_stats_test),
 		TEST_CASES_END() /**< NULL terminate unit test array */
 	}
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v3 1/3] eventdev/eth_rx: add queue stats get and reset APIs
  2021-10-28  7:06 ` [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
                     ` (3 preceding siblings ...)
  2021-10-28 10:27   ` [dpdk-dev] [PATCH v3 " Naga Harish K S V
@ 2021-10-28 10:37   ` Naga Harish K S V
  2021-10-28 10:37     ` [dpdk-dev] [PATCH v3 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
  2021-10-28 10:37     ` [dpdk-dev] [PATCH v3 3/3] test/event: add unit test for Rx adapter Naga Harish K S V
  4 siblings, 2 replies; 17+ messages in thread
From: Naga Harish K S V @ 2021-10-28 10:37 UTC (permalink / raw)
  To: jerinj, jay.jayatheerthan; +Cc: dev

This patch adds new api ``rte_event_eth_rx_adapter_queue_stats_get`` to
retrieve queue stats. The queue stats are in the format
``struct rte_event_eth_rx_adapter_queue_stats``.

For resetting the queue stats,
``rte_event_eth_rx_adapter_queue_stats_reset`` api is added.

The adapter stats_get and stats_reset apis are also updated to
handle queue level event buffer use case.

Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
---
v2:
* added pmd callback support for adapter queue_stats_get and
  queue_stats_reset apis.

v3:
* addressed coding style review comments
---
 .../prog_guide/event_ethernet_rx_adapter.rst  |  11 +
 lib/eventdev/eventdev_pmd.h                   |  52 ++++
 lib/eventdev/rte_event_eth_rx_adapter.c       | 268 +++++++++++++++---
 lib/eventdev/rte_event_eth_rx_adapter.h       |  66 +++++
 lib/eventdev/version.map                      |   2 +
 5 files changed, 356 insertions(+), 43 deletions(-)

diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
index 8b58130fc5..67b11e1563 100644
--- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
+++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
@@ -166,6 +166,17 @@ flags for handling received packets, event queue identifier, scheduler type,
 event priority, polling frequency of the receive queue and flow identifier
 in struct ``rte_event_eth_rx_adapter_queue_conf``.
 
+Getting and resetting Adapter queue stats
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``rte_event_eth_rx_adapter_queue_stats_get()`` function reports
+adapter queue counters defined in struct ``rte_event_eth_rx_adapter_queue_stats``.
+This function reports queue level stats only when queue level event buffer is
+used otherwise it returns -EINVAL.
+
+The ``rte_event_eth_rx_adapter_queue_stats_reset`` function can be used to
+reset queue level stats when queue level event buffer is in use.
+
 Interrupt Based Rx Queues
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
 
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index d009e24309..3ba49d1fd4 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -749,6 +749,53 @@ typedef int (*eventdev_eth_rx_adapter_stats_get)
 typedef int (*eventdev_eth_rx_adapter_stats_reset)
 			(const struct rte_eventdev *dev,
 			const struct rte_eth_dev *eth_dev);
+
+struct rte_event_eth_rx_adapter_queue_stats;
+
+/**
+ * Retrieve ethernet Rx adapter queue statistics.
+ *
+ * @param dev
+ *   Event device pointer
+ *
+ * @param eth_dev
+ *   Ethernet device pointer
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @param[out] q_stats
+ *   Pointer to queue stats structure
+ *
+ * @return
+ *   Return 0 on success.
+ */
+typedef int (*eventdev_eth_rx_adapter_q_stats_get)
+			(const struct rte_eventdev *dev,
+			 const struct rte_eth_dev *eth_dev,
+			 uint16_t rx_queue_id,
+			 struct rte_event_eth_rx_adapter_queue_stats *q_stats);
+
+/**
+ * Reset ethernet Rx adapter queue statistics.
+ *
+ * @param dev
+ *   Event device pointer
+ *
+ * @param eth_dev
+ *   Ethernet device pointer
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @return
+ *   Return 0 on success.
+ */
+typedef int (*eventdev_eth_rx_adapter_q_stats_reset)
+			(const struct rte_eventdev *dev,
+			 const struct rte_eth_dev *eth_dev,
+			 uint16_t rx_queue_id);
+
 /**
  * Start eventdev selftest.
  *
@@ -1224,6 +1271,11 @@ struct eventdev_ops {
 	eventdev_crypto_adapter_stats_reset crypto_adapter_stats_reset;
 	/**< Reset crypto stats */
 
+	eventdev_eth_rx_adapter_q_stats_get eth_rx_adapter_queue_stats_get;
+	/**< Get ethernet Rx queue stats */
+	eventdev_eth_rx_adapter_q_stats_reset eth_rx_adapter_queue_stats_reset;
+	/**< Reset ethernet Rx queue stats */
+
 	eventdev_eth_tx_adapter_caps_get_t eth_tx_adapter_caps_get;
 	/**< Get ethernet Tx adapter capabilities */
 
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index a175c61551..3adec52eac 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -245,6 +245,10 @@ struct eth_rx_queue_info {
 	uint64_t event;
 	struct eth_rx_vector_data vector_data;
 	struct eth_event_enqueue_buffer *event_buf;
+	/* use adapter stats struct for queue level stats,
+	 * as same stats need to be updated for adapter and queue
+	 */
+	struct rte_event_eth_rx_adapter_stats *stats;
 };
 
 static struct event_eth_rx_adapter **event_eth_rx_adapter;
@@ -268,14 +272,18 @@ rxa_validate_id(uint8_t id)
 
 static inline struct eth_event_enqueue_buffer *
 rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
-		  uint16_t rx_queue_id)
+		  uint16_t rx_queue_id,
+		  struct rte_event_eth_rx_adapter_stats **stats)
 {
 	if (rx_adapter->use_queue_event_buf) {
 		struct eth_device_info *dev_info =
 			&rx_adapter->eth_devices[eth_dev_id];
+		*stats = dev_info->rx_queue[rx_queue_id].stats;
 		return dev_info->rx_queue[rx_queue_id].event_buf;
-	} else
+	} else {
+		*stats = &rx_adapter->stats;
 		return &rx_adapter->event_enqueue_buffer;
+	}
 }
 
 #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
@@ -766,9 +774,9 @@ rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
 /* Enqueue buffered events to event device */
 static inline uint16_t
 rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
-		       struct eth_event_enqueue_buffer *buf)
+		       struct eth_event_enqueue_buffer *buf,
+		       struct rte_event_eth_rx_adapter_stats *stats)
 {
-	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
 	uint16_t count = buf->last ? buf->last - buf->head : buf->count;
 
 	if (!count)
@@ -883,7 +891,8 @@ rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
 static inline void
 rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
 		 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
-		 struct eth_event_enqueue_buffer *buf)
+		 struct eth_event_enqueue_buffer *buf,
+		 struct rte_event_eth_rx_adapter_stats *stats)
 {
 	uint32_t i;
 	struct eth_device_info *dev_info =
@@ -954,7 +963,7 @@ rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
 		else
 			num = nb_cb;
 		if (dropped)
-			rx_adapter->stats.rx_dropped += dropped;
+			stats->rx_dropped += dropped;
 	}
 
 	buf->count += num;
@@ -985,11 +994,10 @@ rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
 static inline uint32_t
 rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
 	   uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
-	   int *rxq_empty, struct eth_event_enqueue_buffer *buf)
+	   int *rxq_empty, struct eth_event_enqueue_buffer *buf,
+	   struct rte_event_eth_rx_adapter_stats *stats)
 {
 	struct rte_mbuf *mbufs[BATCH_SIZE];
-	struct rte_event_eth_rx_adapter_stats *stats =
-					&rx_adapter->stats;
 	uint16_t n;
 	uint32_t nb_rx = 0;
 
@@ -1000,7 +1008,7 @@ rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
 	 */
 	while (rxa_pkt_buf_available(buf)) {
 		if (buf->count >= BATCH_SIZE)
-			rxa_flush_event_buffer(rx_adapter, buf);
+			rxa_flush_event_buffer(rx_adapter, buf, stats);
 
 		stats->rx_poll_count++;
 		n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
@@ -1009,14 +1017,17 @@ rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
 				*rxq_empty = 1;
 			break;
 		}
-		rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf);
+		rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf,
+				 stats);
 		nb_rx += n;
 		if (rx_count + nb_rx > max_rx)
 			break;
 	}
 
 	if (buf->count > 0)
-		rxa_flush_event_buffer(rx_adapter, buf);
+		rxa_flush_event_buffer(rx_adapter, buf, stats);
+
+	stats->rx_packets += nb_rx;
 
 	return nb_rx;
 }
@@ -1135,28 +1146,30 @@ rxa_intr_thread(void *arg)
 /* Dequeue <port, q> from interrupt ring and enqueue received
  * mbufs to eventdev
  */
-static inline uint32_t
+static inline void
 rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t n;
 	uint32_t nb_rx = 0;
 	int rxq_empty;
 	struct eth_event_enqueue_buffer *buf;
+	struct rte_event_eth_rx_adapter_stats *stats;
 	rte_spinlock_t *ring_lock;
 	uint8_t max_done = 0;
 
 	if (rx_adapter->num_rx_intr == 0)
-		return 0;
+		return;
 
 	if (rte_ring_count(rx_adapter->intr_ring) == 0
 		&& !rx_adapter->qd_valid)
-		return 0;
+		return;
 
 	buf = &rx_adapter->event_enqueue_buffer;
+	stats = &rx_adapter->stats;
 	ring_lock = &rx_adapter->intr_ring_lock;
 
 	if (buf->count >= BATCH_SIZE)
-		rxa_flush_event_buffer(rx_adapter, buf);
+		rxa_flush_event_buffer(rx_adapter, buf, stats);
 
 	while (rxa_pkt_buf_available(buf)) {
 		struct eth_device_info *dev_info;
@@ -1208,7 +1221,7 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 					continue;
 				n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
 					rx_adapter->max_nb_rx,
-					&rxq_empty, buf);
+					&rxq_empty, buf, stats);
 				nb_rx += n;
 
 				enq_buffer_full = !rxq_empty && n == 0;
@@ -1229,7 +1242,7 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 		} else {
 			n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
 				rx_adapter->max_nb_rx,
-				&rxq_empty, buf);
+				&rxq_empty, buf, stats);
 			rx_adapter->qd_valid = !rxq_empty;
 			nb_rx += n;
 			if (nb_rx > rx_adapter->max_nb_rx)
@@ -1239,7 +1252,6 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 
 done:
 	rx_adapter->stats.rx_intr_packets += nb_rx;
-	return nb_rx;
 }
 
 /*
@@ -1255,12 +1267,13 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
  * the hypervisor's switching layer where adjustments can be made to deal with
  * it.
  */
-static inline uint32_t
+static inline void
 rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t num_queue;
 	uint32_t nb_rx = 0;
 	struct eth_event_enqueue_buffer *buf = NULL;
+	struct rte_event_eth_rx_adapter_stats *stats = NULL;
 	uint32_t wrr_pos;
 	uint32_t max_nb_rx;
 
@@ -1273,24 +1286,24 @@ rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 		uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
 		uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
 
-		buf = rxa_event_buf_get(rx_adapter, d, qid);
+		buf = rxa_event_buf_get(rx_adapter, d, qid, &stats);
 
 		/* Don't do a batch dequeue from the rx queue if there isn't
 		 * enough space in the enqueue buffer.
 		 */
 		if (buf->count >= BATCH_SIZE)
-			rxa_flush_event_buffer(rx_adapter, buf);
+			rxa_flush_event_buffer(rx_adapter, buf, stats);
 		if (!rxa_pkt_buf_available(buf)) {
 			if (rx_adapter->use_queue_event_buf)
 				goto poll_next_entry;
 			else {
 				rx_adapter->wrr_pos = wrr_pos;
-				return nb_rx;
+				return;
 			}
 		}
 
 		nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
-				NULL, buf);
+				NULL, buf, stats);
 		if (nb_rx > max_nb_rx) {
 			rx_adapter->wrr_pos =
 				    (wrr_pos + 1) % rx_adapter->wrr_len;
@@ -1301,7 +1314,6 @@ rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 		if (++wrr_pos == rx_adapter->wrr_len)
 			wrr_pos = 0;
 	}
-	return nb_rx;
 }
 
 static void
@@ -1309,12 +1321,13 @@ rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 {
 	struct event_eth_rx_adapter *rx_adapter = arg;
 	struct eth_event_enqueue_buffer *buf = NULL;
+	struct rte_event_eth_rx_adapter_stats *stats = NULL;
 	struct rte_event *ev;
 
-	buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue);
+	buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue, &stats);
 
 	if (buf->count)
-		rxa_flush_event_buffer(rx_adapter, buf);
+		rxa_flush_event_buffer(rx_adapter, buf, stats);
 
 	if (vec->vector_ev->nb_elem == 0)
 		return;
@@ -1333,7 +1346,6 @@ static int
 rxa_service_func(void *args)
 {
 	struct event_eth_rx_adapter *rx_adapter = args;
-	struct rte_event_eth_rx_adapter_stats *stats;
 
 	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
 		return 0;
@@ -1360,10 +1372,11 @@ rxa_service_func(void *args)
 		}
 	}
 
-	stats = &rx_adapter->stats;
-	stats->rx_packets += rxa_intr_ring_dequeue(rx_adapter);
-	stats->rx_packets += rxa_poll(rx_adapter);
+	rxa_intr_ring_dequeue(rx_adapter);
+	rxa_poll(rx_adapter);
+
 	rte_spinlock_unlock(&rx_adapter->rx_lock);
+
 	return 0;
 }
 
@@ -1937,9 +1950,13 @@ rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
 	if (rx_adapter->use_queue_event_buf) {
 		struct eth_event_enqueue_buffer *event_buf =
 			dev_info->rx_queue[rx_queue_id].event_buf;
+		struct rte_event_eth_rx_adapter_stats *stats =
+			dev_info->rx_queue[rx_queue_id].stats;
 		rte_free(event_buf->events);
 		rte_free(event_buf);
+		rte_free(stats);
 		dev_info->rx_queue[rx_queue_id].event_buf = NULL;
+		dev_info->rx_queue[rx_queue_id].stats = NULL;
 	}
 }
 
@@ -1955,6 +1972,7 @@ rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
 	int sintrq;
 	struct rte_event *qi_ev;
 	struct eth_event_enqueue_buffer *new_rx_buf = NULL;
+	struct rte_event_eth_rx_adapter_stats *stats = NULL;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
 	int ret;
 
@@ -2061,6 +2079,21 @@ rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
 
 	queue_info->event_buf = new_rx_buf;
 
+	/* Allocate storage for adapter queue stats */
+	stats = rte_zmalloc_socket("rx_queue_stats",
+				sizeof(*stats), 0,
+				rte_eth_dev_socket_id(eth_dev_id));
+	if (stats == NULL) {
+		rte_free(new_rx_buf->events);
+		rte_free(new_rx_buf);
+		RTE_EDEV_LOG_ERR("Failed to allocate stats storage for"
+				 " dev_id: %d queue_id: %d",
+				 eth_dev_id, rx_queue_id);
+		return -ENOMEM;
+	}
+
+	queue_info->stats = stats;
+
 	return 0;
 }
 
@@ -2819,6 +2852,15 @@ rte_event_eth_rx_adapter_stop(uint8_t id)
 	return rxa_ctrl(id, 0);
 }
 
+static inline void
+rxa_queue_stats_reset(struct eth_rx_queue_info *queue_info)
+{
+	struct rte_event_eth_rx_adapter_stats *q_stats;
+
+	q_stats = queue_info->stats;
+	memset(q_stats, 0, sizeof(*q_stats));
+}
+
 int
 rte_event_eth_rx_adapter_stats_get(uint8_t id,
 			       struct rte_event_eth_rx_adapter_stats *stats)
@@ -2829,7 +2871,9 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 	struct rte_event_eth_rx_adapter_stats dev_stats;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
-	uint32_t i;
+	struct eth_rx_queue_info *queue_info;
+	struct rte_event_eth_rx_adapter_stats *q_stats;
+	uint32_t i, j;
 	int ret;
 
 	if (rxa_memzone_lookup())
@@ -2843,8 +2887,32 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 
 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
 	memset(stats, 0, sizeof(*stats));
+
+	if (rx_adapter->service_inited)
+		*stats = rx_adapter->stats;
+
 	RTE_ETH_FOREACH_DEV(i) {
 		dev_info = &rx_adapter->eth_devices[i];
+
+		if (rx_adapter->use_queue_event_buf && dev_info->rx_queue) {
+
+			for (j = 0; j < dev_info->dev->data->nb_rx_queues;
+			     j++) {
+				queue_info = &dev_info->rx_queue[j];
+				if (!queue_info->queue_enabled)
+					continue;
+				q_stats = queue_info->stats;
+
+				stats->rx_packets += q_stats->rx_packets;
+				stats->rx_poll_count += q_stats->rx_poll_count;
+				stats->rx_enq_count += q_stats->rx_enq_count;
+				stats->rx_enq_retry += q_stats->rx_enq_retry;
+				stats->rx_dropped += q_stats->rx_dropped;
+				stats->rx_enq_block_cycles +=
+						q_stats->rx_enq_block_cycles;
+			}
+		}
+
 		if (dev_info->internal_event_port == 0 ||
 			dev->dev_ops->eth_rx_adapter_stats_get == NULL)
 			continue;
@@ -2857,19 +2925,69 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 		dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
 	}
 
-	if (rx_adapter->service_inited)
-		*stats = rx_adapter->stats;
-
+	buf = &rx_adapter->event_enqueue_buffer;
 	stats->rx_packets += dev_stats_sum.rx_packets;
 	stats->rx_enq_count += dev_stats_sum.rx_enq_count;
+	stats->rx_event_buf_count = buf->count;
+	stats->rx_event_buf_size = buf->events_size;
 
-	if (!rx_adapter->use_queue_event_buf) {
-		buf = &rx_adapter->event_enqueue_buffer;
-		stats->rx_event_buf_count = buf->count;
-		stats->rx_event_buf_size = buf->events_size;
-	} else {
-		stats->rx_event_buf_count = 0;
-		stats->rx_event_buf_size = 0;
+	return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id,
+		struct rte_event_eth_rx_adapter_queue_stats *stats)
+{
+	struct event_eth_rx_adapter *rx_adapter;
+	struct eth_device_info *dev_info;
+	struct eth_rx_queue_info *queue_info;
+	struct eth_event_enqueue_buffer *event_buf;
+	struct rte_event_eth_rx_adapter_stats *q_stats;
+	struct rte_eventdev *dev;
+
+	if (rxa_memzone_lookup())
+		return -ENOMEM;
+
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	rx_adapter = rxa_id_to_adapter(id);
+
+	if (rx_adapter == NULL || stats == NULL)
+		return -EINVAL;
+
+	if (!rx_adapter->use_queue_event_buf)
+		return -EINVAL;
+
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
+		return -EINVAL;
+	}
+
+	dev_info = &rx_adapter->eth_devices[eth_dev_id];
+	if (dev_info->rx_queue == NULL ||
+	    !dev_info->rx_queue[rx_queue_id].queue_enabled) {
+		RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
+		return -EINVAL;
+	}
+
+	queue_info = &dev_info->rx_queue[rx_queue_id];
+	event_buf = queue_info->event_buf;
+	q_stats = queue_info->stats;
+
+	stats->rx_event_buf_count = event_buf->count;
+	stats->rx_event_buf_size = event_buf->events_size;
+	stats->rx_packets = q_stats->rx_packets;
+	stats->rx_poll_count = q_stats->rx_poll_count;
+	stats->rx_dropped = q_stats->rx_dropped;
+
+	dev = &rte_eventdevs[rx_adapter->eventdev_id];
+	if (dev->dev_ops->eth_rx_adapter_queue_stats_get != NULL) {
+		return (*dev->dev_ops->eth_rx_adapter_queue_stats_get)(dev,
+						&rte_eth_devices[eth_dev_id],
+						rx_queue_id, stats);
 	}
 
 	return 0;
@@ -2881,7 +2999,8 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
-	uint32_t i;
+	struct eth_rx_queue_info *queue_info;
+	uint32_t i, j;
 
 	if (rxa_memzone_lookup())
 		return -ENOMEM;
@@ -2893,8 +3012,21 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 		return -EINVAL;
 
 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
+
 	RTE_ETH_FOREACH_DEV(i) {
 		dev_info = &rx_adapter->eth_devices[i];
+
+		if (rx_adapter->use_queue_event_buf  && dev_info->rx_queue) {
+
+			for (j = 0; j < dev_info->dev->data->nb_rx_queues;
+						j++) {
+				queue_info = &dev_info->rx_queue[j];
+				if (!queue_info->queue_enabled)
+					continue;
+				rxa_queue_stats_reset(queue_info);
+			}
+		}
+
 		if (dev_info->internal_event_port == 0 ||
 			dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
 			continue;
@@ -2903,6 +3035,56 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 	}
 
 	memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
+
+	return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id)
+{
+	struct event_eth_rx_adapter *rx_adapter;
+	struct eth_device_info *dev_info;
+	struct eth_rx_queue_info *queue_info;
+	struct rte_eventdev *dev;
+
+	if (rxa_memzone_lookup())
+		return -ENOMEM;
+
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	rx_adapter = rxa_id_to_adapter(id);
+	if (rx_adapter == NULL)
+		return -EINVAL;
+
+	if (!rx_adapter->use_queue_event_buf)
+		return -EINVAL;
+
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
+		return -EINVAL;
+	}
+
+	dev_info = &rx_adapter->eth_devices[eth_dev_id];
+
+	if (dev_info->rx_queue == NULL ||
+	    !dev_info->rx_queue[rx_queue_id].queue_enabled) {
+		RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
+		return -EINVAL;
+	}
+
+	queue_info = &dev_info->rx_queue[rx_queue_id];
+	rxa_queue_stats_reset(queue_info);
+
+	dev = &rte_eventdevs[rx_adapter->eventdev_id];
+	if (dev->dev_ops->eth_rx_adapter_queue_stats_reset != NULL) {
+		return (*dev->dev_ops->eth_rx_adapter_queue_stats_reset)(dev,
+						&rte_eth_devices[eth_dev_id],
+						rx_queue_id);
+	}
+
 	return 0;
 }
 
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
index ab625f7273..9546d792e9 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/eventdev/rte_event_eth_rx_adapter.h
@@ -35,6 +35,8 @@
  *  - rte_event_eth_rx_adapter_stats_get()
  *  - rte_event_eth_rx_adapter_stats_reset()
  *  - rte_event_eth_rx_adapter_queue_conf_get()
+ *  - rte_event_eth_rx_adapter_queue_stats_get()
+ *  - rte_event_eth_rx_adapter_queue_stats_reset()
  *
  * The application creates an ethernet to event adapter using
  * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create()
@@ -204,6 +206,23 @@ struct rte_event_eth_rx_adapter_queue_conf {
 	/**< event buffer size for this queue */
 };
 
+/**
+ * A structure used to retrieve statistics for an
+ * eth rx adapter queue.
+ */
+struct rte_event_eth_rx_adapter_queue_stats {
+	uint64_t rx_event_buf_count;
+	/**< Rx event buffered count */
+	uint64_t rx_event_buf_size;
+	/**< Rx event buffer size */
+	uint64_t rx_poll_count;
+	/**< Receive queue poll count */
+	uint64_t rx_packets;
+	/**< Received packet count */
+	uint64_t rx_dropped;
+	/**< Received packet dropped count */
+};
+
 /**
  * A structure used to retrieve statistics for an eth rx adapter instance.
  */
@@ -617,6 +636,53 @@ int rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
 			uint16_t rx_queue_id,
 			struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
 
+/**
+ * Retrieve Rx queue statistics.
+ *
+ * @param id
+ *  Adapter identifier.
+ *
+ * @param eth_dev_id
+ *  Port identifier of Ethernet device.
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @param[out] stats
+ *  Pointer to struct rte_event_eth_rx_adapter_queue_stats
+ *
+ * @return
+ *  - 0: Success, queue buffer stats retrieved.
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id,
+		struct rte_event_eth_rx_adapter_queue_stats *stats);
+
+/**
+ * Reset Rx queue statistics.
+ *
+ * @param id
+ *  Adapter identifier.
+ *
+ * @param eth_dev_id
+ *  Port identifier of Ethernet device.
+ *
+ * @param rx_queue_id
+ *  Ethernet device receive queue index.
+ *
+ * @return
+ *  - 0: Success, queue buffer stats retrieved.
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
+		uint16_t eth_dev_id,
+		uint16_t rx_queue_id);
 
 #ifdef __cplusplus
 }
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index cd37164141..ade1f1182e 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -103,6 +103,8 @@ EXPERIMENTAL {
 	# added in 21.11
 	rte_event_eth_rx_adapter_create_with_params;
 	rte_event_eth_rx_adapter_queue_conf_get;
+	rte_event_eth_rx_adapter_queue_stats_get;
+	rte_event_eth_rx_adapter_queue_stats_reset;
 };
 
 INTERNAL {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v3 2/3] eventdev/eth_rx: support telemetry
  2021-10-28 10:37   ` Naga Harish K S V
@ 2021-10-28 10:37     ` Naga Harish K S V
  2021-10-28 10:37     ` [dpdk-dev] [PATCH v3 3/3] test/event: add unit test for Rx adapter Naga Harish K S V
  1 sibling, 0 replies; 17+ messages in thread
From: Naga Harish K S V @ 2021-10-28 10:37 UTC (permalink / raw)
  To: jerinj, jay.jayatheerthan; +Cc: dev

Added telemetry support for rxa_queue_stats and
rxa_queue_stats_reset to get and reset rx queue
stats respectively

Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
---
 lib/eventdev/rte_event_eth_rx_adapter.c | 124 ++++++++++++++++++++++++
 1 file changed, 124 insertions(+)

diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index 3adec52eac..4e0829c492 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -3345,6 +3345,122 @@ handle_rxa_get_queue_conf(const char *cmd __rte_unused,
 	return 0;
 }
 
+static int
+handle_rxa_get_queue_stats(const char *cmd __rte_unused,
+			   const char *params,
+			   struct rte_tel_data *d)
+{
+	uint8_t rx_adapter_id;
+	uint16_t rx_queue_id;
+	int eth_dev_id;
+	char *token, *l_params;
+	struct rte_event_eth_rx_adapter_queue_stats q_stats;
+
+	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+		return -1;
+
+	/* Get Rx adapter ID from parameter string */
+	l_params = strdup(params);
+	token = strtok(l_params, ",");
+	rx_adapter_id = strtoul(token, NULL, 10);
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get device ID from parameter string */
+	eth_dev_id = strtoul(token, NULL, 10);
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get Rx queue ID from parameter string */
+	rx_queue_id = strtoul(token, NULL, 10);
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+		return -EINVAL;
+	}
+
+	token = strtok(NULL, "\0");
+	if (token != NULL)
+		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+				 " telemetry command, igrnoring");
+
+	if (rte_event_eth_rx_adapter_queue_stats_get(rx_adapter_id, eth_dev_id,
+						    rx_queue_id, &q_stats)) {
+		RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue stats");
+		return -1;
+	}
+
+	rte_tel_data_start_dict(d);
+	rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
+	rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
+	rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
+	RXA_ADD_DICT(q_stats, rx_event_buf_count);
+	RXA_ADD_DICT(q_stats, rx_event_buf_size);
+	RXA_ADD_DICT(q_stats, rx_poll_count);
+	RXA_ADD_DICT(q_stats, rx_packets);
+	RXA_ADD_DICT(q_stats, rx_dropped);
+
+	return 0;
+}
+
+static int
+handle_rxa_queue_stats_reset(const char *cmd __rte_unused,
+			     const char *params,
+			     struct rte_tel_data *d __rte_unused)
+{
+	uint8_t rx_adapter_id;
+	uint16_t rx_queue_id;
+	int eth_dev_id;
+	char *token, *l_params;
+
+	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+		return -1;
+
+	/* Get Rx adapter ID from parameter string */
+	l_params = strdup(params);
+	token = strtok(l_params, ",");
+	rx_adapter_id = strtoul(token, NULL, 10);
+	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get device ID from parameter string */
+	eth_dev_id = strtoul(token, NULL, 10);
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+	token = strtok(NULL, ",");
+	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+		return -1;
+
+	/* Get Rx queue ID from parameter string */
+	rx_queue_id = strtoul(token, NULL, 10);
+	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+		return -EINVAL;
+	}
+
+	token = strtok(NULL, "\0");
+	if (token != NULL)
+		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+				 " telemetry command, igrnoring");
+
+	if (rte_event_eth_rx_adapter_queue_stats_reset(rx_adapter_id,
+						       eth_dev_id,
+						       rx_queue_id)) {
+		RTE_EDEV_LOG_ERR("Failed to reset Rx adapter queue stats");
+		return -1;
+	}
+
+	return 0;
+}
+
 RTE_INIT(rxa_init_telemetry)
 {
 	rte_telemetry_register_cmd("/eventdev/rxa_stats",
@@ -3358,4 +3474,12 @@ RTE_INIT(rxa_init_telemetry)
 	rte_telemetry_register_cmd("/eventdev/rxa_queue_conf",
 		handle_rxa_get_queue_conf,
 		"Returns Rx queue config. Parameter: rxa_id, dev_id, queue_id");
+
+	rte_telemetry_register_cmd("/eventdev/rxa_queue_stats",
+		handle_rxa_get_queue_stats,
+		"Returns Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
+
+	rte_telemetry_register_cmd("/eventdev/rxa_queue_stats_reset",
+		handle_rxa_queue_stats_reset,
+		"Reset Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v3 3/3] test/event: add unit test for Rx adapter
  2021-10-28 10:37   ` Naga Harish K S V
  2021-10-28 10:37     ` [dpdk-dev] [PATCH v3 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
@ 2021-10-28 10:37     ` Naga Harish K S V
  1 sibling, 0 replies; 17+ messages in thread
From: Naga Harish K S V @ 2021-10-28 10:37 UTC (permalink / raw)
  To: jerinj, jay.jayatheerthan; +Cc: dev

add unit test for rte_event_eth_rx_adapter_queue_stats_get() and
rte_event_eth_rx_adapter_queue_stats_reset() apis.

Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
---
 app/test/test_event_eth_rx_adapter.c | 60 ++++++++++++++++++++++++++++
 1 file changed, 60 insertions(+)

diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c
index 1419f6f64d..7cb91b152f 100644
--- a/app/test/test_event_eth_rx_adapter.c
+++ b/app/test/test_event_eth_rx_adapter.c
@@ -471,6 +471,64 @@ adapter_queue_event_buf_test(void)
 	return TEST_SUCCESS;
 }
 
+static int
+adapter_queue_stats_test(void)
+{
+	int err;
+	struct rte_event ev;
+	uint32_t cap;
+	struct rte_event_eth_rx_adapter_queue_conf queue_config = {0};
+	struct rte_event_eth_rx_adapter_queue_stats q_stats;
+
+	err = rte_event_eth_rx_adapter_queue_stats_get(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0,
+						&q_stats);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_stats_reset(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+	err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
+					 &cap);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	ev.queue_id = 0;
+	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+	ev.priority = 0;
+
+	queue_config.rx_queue_flags = 0;
+	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
+		ev.flow_id = 1;
+		queue_config.rx_queue_flags =
+			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
+	}
+	queue_config.ev = ev;
+	queue_config.servicing_weight = 1;
+	queue_config.event_buf_size = 1024;
+
+	err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+					TEST_ETHDEV_ID, 0,
+					&queue_config);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_stats_get(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0,
+						&q_stats);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_stats_reset(TEST_INST_ID,
+						TEST_ETHDEV_ID, 0);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+						TEST_ETHDEV_ID,
+						0);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	return TEST_SUCCESS;
+}
+
 static void
 adapter_free(void)
 {
@@ -940,6 +998,8 @@ static struct unit_test_suite event_eth_rx_tests = {
 		TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_conf),
 		TEST_CASE_ST(adapter_create_with_params, adapter_free,
 			     adapter_queue_event_buf_test),
+		TEST_CASE_ST(adapter_create_with_params, adapter_free,
+			     adapter_queue_stats_test),
 		TEST_CASES_END() /**< NULL terminate unit test array */
 	}
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [dpdk-dev] [PATCH v3 1/3] eventdev/eth_rx: add queue stats get and reset APIs
  2021-10-28 10:27   ` [dpdk-dev] [PATCH v3 " Naga Harish K S V
  2021-10-28 10:27     ` [dpdk-dev] [PATCH v3 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
  2021-10-28 10:27     ` [dpdk-dev] [PATCH v3 3/3] test/event: add unit test for Rx adapter Naga Harish K S V
@ 2021-11-02 11:00     ` Jerin Jacob
  2 siblings, 0 replies; 17+ messages in thread
From: Jerin Jacob @ 2021-11-02 11:00 UTC (permalink / raw)
  To: Naga Harish K S V; +Cc: Jerin Jacob, Jayatheerthan, Jay, dpdk-dev

On Thu, Oct 28, 2021 at 4:22 PM Naga Harish K S V
<s.v.naga.harish.k@intel.com> wrote:
>
> This patch adds new api ``rte_event_eth_rx_adapter_queue_stats_get`` to
> retrieve queue stats. The queue stats are in the format
> ``struct rte_event_eth_rx_adapter_queue_stats``.
>
> For resetting the queue stats,
> ``rte_event_eth_rx_adapter_queue_stats_reset`` api is added.
>
> The adapter stats_get and stats_reset apis are also updated to
> handle queue level event buffer use case.
>
> Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
> Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>


Series applied to dpdk-next-net-eventdev/for-main. Thanks


> ---
> v2:
> * added pmd callback support for adapter queue_stats_get and
>   queue_stats_reset apis.
>
> v3:
> * addressed coding style review comments
> ---
>  .../prog_guide/event_ethernet_rx_adapter.rst  |  11 +
>  lib/eventdev/eventdev_pmd.h                   |  52 ++++
>  lib/eventdev/rte_event_eth_rx_adapter.c       | 268 +++++++++++++++---
>  lib/eventdev/rte_event_eth_rx_adapter.h       |  66 +++++
>  lib/eventdev/version.map                      |   2 +
>  5 files changed, 356 insertions(+), 43 deletions(-)
>
> diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
> index 8b58130fc5..67b11e1563 100644
> --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
> +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
> @@ -166,6 +166,17 @@ flags for handling received packets, event queue identifier, scheduler type,
>  event priority, polling frequency of the receive queue and flow identifier
>  in struct ``rte_event_eth_rx_adapter_queue_conf``.
>
> +Getting and resetting Adapter queue stats
> +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> +
> +The ``rte_event_eth_rx_adapter_queue_stats_get()`` function reports
> +adapter queue counters defined in struct ``rte_event_eth_rx_adapter_queue_stats``.
> +This function reports queue level stats only when queue level event buffer is
> +used otherwise it returns -EINVAL.
> +
> +The ``rte_event_eth_rx_adapter_queue_stats_reset`` function can be used to
> +reset queue level stats when queue level event buffer is in use.
> +
>  Interrupt Based Rx Queues
>  ~~~~~~~~~~~~~~~~~~~~~~~~~~
>
> diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
> index d009e24309..3ba49d1fd4 100644
> --- a/lib/eventdev/eventdev_pmd.h
> +++ b/lib/eventdev/eventdev_pmd.h
> @@ -749,6 +749,53 @@ typedef int (*eventdev_eth_rx_adapter_stats_get)
>  typedef int (*eventdev_eth_rx_adapter_stats_reset)
>                         (const struct rte_eventdev *dev,
>                         const struct rte_eth_dev *eth_dev);
> +
> +struct rte_event_eth_rx_adapter_queue_stats;
> +
> +/**
> + * Retrieve ethernet Rx adapter queue statistics.
> + *
> + * @param dev
> + *   Event device pointer
> + *
> + * @param eth_dev
> + *   Ethernet device pointer
> + *
> + * @param rx_queue_id
> + *  Ethernet device receive queue index.
> + *
> + * @param[out] q_stats
> + *   Pointer to queue stats structure
> + *
> + * @return
> + *   Return 0 on success.
> + */
> +typedef int (*eventdev_eth_rx_adapter_q_stats_get)
> +                       (const struct rte_eventdev *dev,
> +                        const struct rte_eth_dev *eth_dev,
> +                        uint16_t rx_queue_id,
> +                        struct rte_event_eth_rx_adapter_queue_stats *q_stats);
> +
> +/**
> + * Reset ethernet Rx adapter queue statistics.
> + *
> + * @param dev
> + *   Event device pointer
> + *
> + * @param eth_dev
> + *   Ethernet device pointer
> + *
> + * @param rx_queue_id
> + *  Ethernet device receive queue index.
> + *
> + * @return
> + *   Return 0 on success.
> + */
> +typedef int (*eventdev_eth_rx_adapter_q_stats_reset)
> +                       (const struct rte_eventdev *dev,
> +                        const struct rte_eth_dev *eth_dev,
> +                        uint16_t rx_queue_id);
> +
>  /**
>   * Start eventdev selftest.
>   *
> @@ -1224,6 +1271,11 @@ struct eventdev_ops {
>         eventdev_crypto_adapter_stats_reset crypto_adapter_stats_reset;
>         /**< Reset crypto stats */
>
> +       eventdev_eth_rx_adapter_q_stats_get eth_rx_adapter_queue_stats_get;
> +       /**< Get ethernet Rx queue stats */
> +       eventdev_eth_rx_adapter_q_stats_reset eth_rx_adapter_queue_stats_reset;
> +       /**< Reset ethernet Rx queue stats */
> +
>         eventdev_eth_tx_adapter_caps_get_t eth_tx_adapter_caps_get;
>         /**< Get ethernet Tx adapter capabilities */
>
> diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
> index a175c61551..3adec52eac 100644
> --- a/lib/eventdev/rte_event_eth_rx_adapter.c
> +++ b/lib/eventdev/rte_event_eth_rx_adapter.c
> @@ -245,6 +245,10 @@ struct eth_rx_queue_info {
>         uint64_t event;
>         struct eth_rx_vector_data vector_data;
>         struct eth_event_enqueue_buffer *event_buf;
> +       /* use adapter stats struct for queue level stats,
> +        * as same stats need to be updated for adapter and queue
> +        */
> +       struct rte_event_eth_rx_adapter_stats *stats;
>  };
>
>  static struct event_eth_rx_adapter **event_eth_rx_adapter;
> @@ -268,14 +272,18 @@ rxa_validate_id(uint8_t id)
>
>  static inline struct eth_event_enqueue_buffer *
>  rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
> -                 uint16_t rx_queue_id)
> +                 uint16_t rx_queue_id,
> +                 struct rte_event_eth_rx_adapter_stats **stats)
>  {
>         if (rx_adapter->use_queue_event_buf) {
>                 struct eth_device_info *dev_info =
>                         &rx_adapter->eth_devices[eth_dev_id];
> +               *stats = dev_info->rx_queue[rx_queue_id].stats;
>                 return dev_info->rx_queue[rx_queue_id].event_buf;
> -       } else
> +       } else {
> +               *stats = &rx_adapter->stats;
>                 return &rx_adapter->event_enqueue_buffer;
> +       }
>  }
>
>  #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
> @@ -766,9 +774,9 @@ rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
>  /* Enqueue buffered events to event device */
>  static inline uint16_t
>  rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
> -                      struct eth_event_enqueue_buffer *buf)
> +                      struct eth_event_enqueue_buffer *buf,
> +                      struct rte_event_eth_rx_adapter_stats *stats)
>  {
> -       struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
>         uint16_t count = buf->last ? buf->last - buf->head : buf->count;
>
>         if (!count)
> @@ -883,7 +891,8 @@ rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
>  static inline void
>  rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
>                  uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
> -                struct eth_event_enqueue_buffer *buf)
> +                struct eth_event_enqueue_buffer *buf,
> +                struct rte_event_eth_rx_adapter_stats *stats)
>  {
>         uint32_t i;
>         struct eth_device_info *dev_info =
> @@ -954,7 +963,7 @@ rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
>                 else
>                         num = nb_cb;
>                 if (dropped)
> -                       rx_adapter->stats.rx_dropped += dropped;
> +                       stats->rx_dropped += dropped;
>         }
>
>         buf->count += num;
> @@ -985,11 +994,10 @@ rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
>  static inline uint32_t
>  rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
>            uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
> -          int *rxq_empty, struct eth_event_enqueue_buffer *buf)
> +          int *rxq_empty, struct eth_event_enqueue_buffer *buf,
> +          struct rte_event_eth_rx_adapter_stats *stats)
>  {
>         struct rte_mbuf *mbufs[BATCH_SIZE];
> -       struct rte_event_eth_rx_adapter_stats *stats =
> -                                       &rx_adapter->stats;
>         uint16_t n;
>         uint32_t nb_rx = 0;
>
> @@ -1000,7 +1008,7 @@ rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
>          */
>         while (rxa_pkt_buf_available(buf)) {
>                 if (buf->count >= BATCH_SIZE)
> -                       rxa_flush_event_buffer(rx_adapter, buf);
> +                       rxa_flush_event_buffer(rx_adapter, buf, stats);
>
>                 stats->rx_poll_count++;
>                 n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
> @@ -1009,14 +1017,17 @@ rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
>                                 *rxq_empty = 1;
>                         break;
>                 }
> -               rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf);
> +               rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf,
> +                                stats);
>                 nb_rx += n;
>                 if (rx_count + nb_rx > max_rx)
>                         break;
>         }
>
>         if (buf->count > 0)
> -               rxa_flush_event_buffer(rx_adapter, buf);
> +               rxa_flush_event_buffer(rx_adapter, buf, stats);
> +
> +       stats->rx_packets += nb_rx;
>
>         return nb_rx;
>  }
> @@ -1135,28 +1146,30 @@ rxa_intr_thread(void *arg)
>  /* Dequeue <port, q> from interrupt ring and enqueue received
>   * mbufs to eventdev
>   */
> -static inline uint32_t
> +static inline void
>  rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
>  {
>         uint32_t n;
>         uint32_t nb_rx = 0;
>         int rxq_empty;
>         struct eth_event_enqueue_buffer *buf;
> +       struct rte_event_eth_rx_adapter_stats *stats;
>         rte_spinlock_t *ring_lock;
>         uint8_t max_done = 0;
>
>         if (rx_adapter->num_rx_intr == 0)
> -               return 0;
> +               return;
>
>         if (rte_ring_count(rx_adapter->intr_ring) == 0
>                 && !rx_adapter->qd_valid)
> -               return 0;
> +               return;
>
>         buf = &rx_adapter->event_enqueue_buffer;
> +       stats = &rx_adapter->stats;
>         ring_lock = &rx_adapter->intr_ring_lock;
>
>         if (buf->count >= BATCH_SIZE)
> -               rxa_flush_event_buffer(rx_adapter, buf);
> +               rxa_flush_event_buffer(rx_adapter, buf, stats);
>
>         while (rxa_pkt_buf_available(buf)) {
>                 struct eth_device_info *dev_info;
> @@ -1208,7 +1221,7 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
>                                         continue;
>                                 n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
>                                         rx_adapter->max_nb_rx,
> -                                       &rxq_empty, buf);
> +                                       &rxq_empty, buf, stats);
>                                 nb_rx += n;
>
>                                 enq_buffer_full = !rxq_empty && n == 0;
> @@ -1229,7 +1242,7 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
>                 } else {
>                         n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
>                                 rx_adapter->max_nb_rx,
> -                               &rxq_empty, buf);
> +                               &rxq_empty, buf, stats);
>                         rx_adapter->qd_valid = !rxq_empty;
>                         nb_rx += n;
>                         if (nb_rx > rx_adapter->max_nb_rx)
> @@ -1239,7 +1252,6 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
>
>  done:
>         rx_adapter->stats.rx_intr_packets += nb_rx;
> -       return nb_rx;
>  }
>
>  /*
> @@ -1255,12 +1267,13 @@ rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
>   * the hypervisor's switching layer where adjustments can be made to deal with
>   * it.
>   */
> -static inline uint32_t
> +static inline void
>  rxa_poll(struct event_eth_rx_adapter *rx_adapter)
>  {
>         uint32_t num_queue;
>         uint32_t nb_rx = 0;
>         struct eth_event_enqueue_buffer *buf = NULL;
> +       struct rte_event_eth_rx_adapter_stats *stats = NULL;
>         uint32_t wrr_pos;
>         uint32_t max_nb_rx;
>
> @@ -1273,24 +1286,24 @@ rxa_poll(struct event_eth_rx_adapter *rx_adapter)
>                 uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
>                 uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
>
> -               buf = rxa_event_buf_get(rx_adapter, d, qid);
> +               buf = rxa_event_buf_get(rx_adapter, d, qid, &stats);
>
>                 /* Don't do a batch dequeue from the rx queue if there isn't
>                  * enough space in the enqueue buffer.
>                  */
>                 if (buf->count >= BATCH_SIZE)
> -                       rxa_flush_event_buffer(rx_adapter, buf);
> +                       rxa_flush_event_buffer(rx_adapter, buf, stats);
>                 if (!rxa_pkt_buf_available(buf)) {
>                         if (rx_adapter->use_queue_event_buf)
>                                 goto poll_next_entry;
>                         else {
>                                 rx_adapter->wrr_pos = wrr_pos;
> -                               return nb_rx;
> +                               return;
>                         }
>                 }
>
>                 nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
> -                               NULL, buf);
> +                               NULL, buf, stats);
>                 if (nb_rx > max_nb_rx) {
>                         rx_adapter->wrr_pos =
>                                     (wrr_pos + 1) % rx_adapter->wrr_len;
> @@ -1301,7 +1314,6 @@ rxa_poll(struct event_eth_rx_adapter *rx_adapter)
>                 if (++wrr_pos == rx_adapter->wrr_len)
>                         wrr_pos = 0;
>         }
> -       return nb_rx;
>  }
>
>  static void
> @@ -1309,12 +1321,13 @@ rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
>  {
>         struct event_eth_rx_adapter *rx_adapter = arg;
>         struct eth_event_enqueue_buffer *buf = NULL;
> +       struct rte_event_eth_rx_adapter_stats *stats = NULL;
>         struct rte_event *ev;
>
> -       buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue);
> +       buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue, &stats);
>
>         if (buf->count)
> -               rxa_flush_event_buffer(rx_adapter, buf);
> +               rxa_flush_event_buffer(rx_adapter, buf, stats);
>
>         if (vec->vector_ev->nb_elem == 0)
>                 return;
> @@ -1333,7 +1346,6 @@ static int
>  rxa_service_func(void *args)
>  {
>         struct event_eth_rx_adapter *rx_adapter = args;
> -       struct rte_event_eth_rx_adapter_stats *stats;
>
>         if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
>                 return 0;
> @@ -1360,10 +1372,11 @@ rxa_service_func(void *args)
>                 }
>         }
>
> -       stats = &rx_adapter->stats;
> -       stats->rx_packets += rxa_intr_ring_dequeue(rx_adapter);
> -       stats->rx_packets += rxa_poll(rx_adapter);
> +       rxa_intr_ring_dequeue(rx_adapter);
> +       rxa_poll(rx_adapter);
> +
>         rte_spinlock_unlock(&rx_adapter->rx_lock);
> +
>         return 0;
>  }
>
> @@ -1937,9 +1950,13 @@ rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
>         if (rx_adapter->use_queue_event_buf) {
>                 struct eth_event_enqueue_buffer *event_buf =
>                         dev_info->rx_queue[rx_queue_id].event_buf;
> +               struct rte_event_eth_rx_adapter_stats *stats =
> +                       dev_info->rx_queue[rx_queue_id].stats;
>                 rte_free(event_buf->events);
>                 rte_free(event_buf);
> +               rte_free(stats);
>                 dev_info->rx_queue[rx_queue_id].event_buf = NULL;
> +               dev_info->rx_queue[rx_queue_id].stats = NULL;
>         }
>  }
>
> @@ -1955,6 +1972,7 @@ rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
>         int sintrq;
>         struct rte_event *qi_ev;
>         struct eth_event_enqueue_buffer *new_rx_buf = NULL;
> +       struct rte_event_eth_rx_adapter_stats *stats = NULL;
>         uint16_t eth_dev_id = dev_info->dev->data->port_id;
>         int ret;
>
> @@ -2061,6 +2079,21 @@ rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
>
>         queue_info->event_buf = new_rx_buf;
>
> +       /* Allocate storage for adapter queue stats */
> +       stats = rte_zmalloc_socket("rx_queue_stats",
> +                               sizeof(*stats), 0,
> +                               rte_eth_dev_socket_id(eth_dev_id));
> +       if (stats == NULL) {
> +               rte_free(new_rx_buf->events);
> +               rte_free(new_rx_buf);
> +               RTE_EDEV_LOG_ERR("Failed to allocate stats storage for"
> +                                " dev_id: %d queue_id: %d",
> +                                eth_dev_id, rx_queue_id);
> +               return -ENOMEM;
> +       }
> +
> +       queue_info->stats = stats;
> +
>         return 0;
>  }
>
> @@ -2819,6 +2852,15 @@ rte_event_eth_rx_adapter_stop(uint8_t id)
>         return rxa_ctrl(id, 0);
>  }
>
> +static inline void
> +rxa_queue_stats_reset(struct eth_rx_queue_info *queue_info)
> +{
> +       struct rte_event_eth_rx_adapter_stats *q_stats;
> +
> +       q_stats = queue_info->stats;
> +       memset(q_stats, 0, sizeof(*q_stats));
> +}
> +
>  int
>  rte_event_eth_rx_adapter_stats_get(uint8_t id,
>                                struct rte_event_eth_rx_adapter_stats *stats)
> @@ -2829,7 +2871,9 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
>         struct rte_event_eth_rx_adapter_stats dev_stats;
>         struct rte_eventdev *dev;
>         struct eth_device_info *dev_info;
> -       uint32_t i;
> +       struct eth_rx_queue_info *queue_info;
> +       struct rte_event_eth_rx_adapter_stats *q_stats;
> +       uint32_t i, j;
>         int ret;
>
>         if (rxa_memzone_lookup())
> @@ -2843,8 +2887,32 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
>
>         dev = &rte_eventdevs[rx_adapter->eventdev_id];
>         memset(stats, 0, sizeof(*stats));
> +
> +       if (rx_adapter->service_inited)
> +               *stats = rx_adapter->stats;
> +
>         RTE_ETH_FOREACH_DEV(i) {
>                 dev_info = &rx_adapter->eth_devices[i];
> +
> +               if (rx_adapter->use_queue_event_buf && dev_info->rx_queue) {
> +
> +                       for (j = 0; j < dev_info->dev->data->nb_rx_queues;
> +                            j++) {
> +                               queue_info = &dev_info->rx_queue[j];
> +                               if (!queue_info->queue_enabled)
> +                                       continue;
> +                               q_stats = queue_info->stats;
> +
> +                               stats->rx_packets += q_stats->rx_packets;
> +                               stats->rx_poll_count += q_stats->rx_poll_count;
> +                               stats->rx_enq_count += q_stats->rx_enq_count;
> +                               stats->rx_enq_retry += q_stats->rx_enq_retry;
> +                               stats->rx_dropped += q_stats->rx_dropped;
> +                               stats->rx_enq_block_cycles +=
> +                                               q_stats->rx_enq_block_cycles;
> +                       }
> +               }
> +
>                 if (dev_info->internal_event_port == 0 ||
>                         dev->dev_ops->eth_rx_adapter_stats_get == NULL)
>                         continue;
> @@ -2857,19 +2925,69 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
>                 dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
>         }
>
> -       if (rx_adapter->service_inited)
> -               *stats = rx_adapter->stats;
> -
> +       buf = &rx_adapter->event_enqueue_buffer;
>         stats->rx_packets += dev_stats_sum.rx_packets;
>         stats->rx_enq_count += dev_stats_sum.rx_enq_count;
> +       stats->rx_event_buf_count = buf->count;
> +       stats->rx_event_buf_size = buf->events_size;
>
> -       if (!rx_adapter->use_queue_event_buf) {
> -               buf = &rx_adapter->event_enqueue_buffer;
> -               stats->rx_event_buf_count = buf->count;
> -               stats->rx_event_buf_size = buf->events_size;
> -       } else {
> -               stats->rx_event_buf_count = 0;
> -               stats->rx_event_buf_size = 0;
> +       return 0;
> +}
> +
> +int
> +rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
> +               uint16_t eth_dev_id,
> +               uint16_t rx_queue_id,
> +               struct rte_event_eth_rx_adapter_queue_stats *stats)
> +{
> +       struct event_eth_rx_adapter *rx_adapter;
> +       struct eth_device_info *dev_info;
> +       struct eth_rx_queue_info *queue_info;
> +       struct eth_event_enqueue_buffer *event_buf;
> +       struct rte_event_eth_rx_adapter_stats *q_stats;
> +       struct rte_eventdev *dev;
> +
> +       if (rxa_memzone_lookup())
> +               return -ENOMEM;
> +
> +       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> +       RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
> +
> +       rx_adapter = rxa_id_to_adapter(id);
> +
> +       if (rx_adapter == NULL || stats == NULL)
> +               return -EINVAL;
> +
> +       if (!rx_adapter->use_queue_event_buf)
> +               return -EINVAL;
> +
> +       if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
> +               RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
> +               return -EINVAL;
> +       }
> +
> +       dev_info = &rx_adapter->eth_devices[eth_dev_id];
> +       if (dev_info->rx_queue == NULL ||
> +           !dev_info->rx_queue[rx_queue_id].queue_enabled) {
> +               RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
> +               return -EINVAL;
> +       }
> +
> +       queue_info = &dev_info->rx_queue[rx_queue_id];
> +       event_buf = queue_info->event_buf;
> +       q_stats = queue_info->stats;
> +
> +       stats->rx_event_buf_count = event_buf->count;
> +       stats->rx_event_buf_size = event_buf->events_size;
> +       stats->rx_packets = q_stats->rx_packets;
> +       stats->rx_poll_count = q_stats->rx_poll_count;
> +       stats->rx_dropped = q_stats->rx_dropped;
> +
> +       dev = &rte_eventdevs[rx_adapter->eventdev_id];
> +       if (dev->dev_ops->eth_rx_adapter_queue_stats_get != NULL) {
> +               return (*dev->dev_ops->eth_rx_adapter_queue_stats_get)(dev,
> +                                               &rte_eth_devices[eth_dev_id],
> +                                               rx_queue_id, stats);
>         }
>
>         return 0;
> @@ -2881,7 +2999,8 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
>         struct event_eth_rx_adapter *rx_adapter;
>         struct rte_eventdev *dev;
>         struct eth_device_info *dev_info;
> -       uint32_t i;
> +       struct eth_rx_queue_info *queue_info;
> +       uint32_t i, j;
>
>         if (rxa_memzone_lookup())
>                 return -ENOMEM;
> @@ -2893,8 +3012,21 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
>                 return -EINVAL;
>
>         dev = &rte_eventdevs[rx_adapter->eventdev_id];
> +
>         RTE_ETH_FOREACH_DEV(i) {
>                 dev_info = &rx_adapter->eth_devices[i];
> +
> +               if (rx_adapter->use_queue_event_buf  && dev_info->rx_queue) {
> +
> +                       for (j = 0; j < dev_info->dev->data->nb_rx_queues;
> +                                               j++) {
> +                               queue_info = &dev_info->rx_queue[j];
> +                               if (!queue_info->queue_enabled)
> +                                       continue;
> +                               rxa_queue_stats_reset(queue_info);
> +                       }
> +               }
> +
>                 if (dev_info->internal_event_port == 0 ||
>                         dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
>                         continue;
> @@ -2903,6 +3035,56 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
>         }
>
>         memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
> +
> +       return 0;
> +}
> +
> +int
> +rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
> +               uint16_t eth_dev_id,
> +               uint16_t rx_queue_id)
> +{
> +       struct event_eth_rx_adapter *rx_adapter;
> +       struct eth_device_info *dev_info;
> +       struct eth_rx_queue_info *queue_info;
> +       struct rte_eventdev *dev;
> +
> +       if (rxa_memzone_lookup())
> +               return -ENOMEM;
> +
> +       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> +       RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
> +
> +       rx_adapter = rxa_id_to_adapter(id);
> +       if (rx_adapter == NULL)
> +               return -EINVAL;
> +
> +       if (!rx_adapter->use_queue_event_buf)
> +               return -EINVAL;
> +
> +       if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
> +               RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
> +               return -EINVAL;
> +       }
> +
> +       dev_info = &rx_adapter->eth_devices[eth_dev_id];
> +
> +       if (dev_info->rx_queue == NULL ||
> +           !dev_info->rx_queue[rx_queue_id].queue_enabled) {
> +               RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
> +               return -EINVAL;
> +       }
> +
> +       queue_info = &dev_info->rx_queue[rx_queue_id];
> +       rxa_queue_stats_reset(queue_info);
> +
> +       dev = &rte_eventdevs[rx_adapter->eventdev_id];
> +       if (dev->dev_ops->eth_rx_adapter_queue_stats_reset != NULL) {
> +               return (*dev->dev_ops->eth_rx_adapter_queue_stats_reset)(dev,
> +                                               &rte_eth_devices[eth_dev_id],
> +                                               rx_queue_id);
> +       }
> +
>         return 0;
>  }
>
> diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
> index ab625f7273..9546d792e9 100644
> --- a/lib/eventdev/rte_event_eth_rx_adapter.h
> +++ b/lib/eventdev/rte_event_eth_rx_adapter.h
> @@ -35,6 +35,8 @@
>   *  - rte_event_eth_rx_adapter_stats_get()
>   *  - rte_event_eth_rx_adapter_stats_reset()
>   *  - rte_event_eth_rx_adapter_queue_conf_get()
> + *  - rte_event_eth_rx_adapter_queue_stats_get()
> + *  - rte_event_eth_rx_adapter_queue_stats_reset()
>   *
>   * The application creates an ethernet to event adapter using
>   * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create()
> @@ -204,6 +206,23 @@ struct rte_event_eth_rx_adapter_queue_conf {
>         /**< event buffer size for this queue */
>  };
>
> +/**
> + * A structure used to retrieve statistics for an
> + * eth rx adapter queue.
> + */
> +struct rte_event_eth_rx_adapter_queue_stats {
> +       uint64_t rx_event_buf_count;
> +       /**< Rx event buffered count */
> +       uint64_t rx_event_buf_size;
> +       /**< Rx event buffer size */
> +       uint64_t rx_poll_count;
> +       /**< Receive queue poll count */
> +       uint64_t rx_packets;
> +       /**< Received packet count */
> +       uint64_t rx_dropped;
> +       /**< Received packet dropped count */
> +};
> +
>  /**
>   * A structure used to retrieve statistics for an eth rx adapter instance.
>   */
> @@ -617,6 +636,53 @@ int rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
>                         uint16_t rx_queue_id,
>                         struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
>
> +/**
> + * Retrieve Rx queue statistics.
> + *
> + * @param id
> + *  Adapter identifier.
> + *
> + * @param eth_dev_id
> + *  Port identifier of Ethernet device.
> + *
> + * @param rx_queue_id
> + *  Ethernet device receive queue index.
> + *
> + * @param[out] stats
> + *  Pointer to struct rte_event_eth_rx_adapter_queue_stats
> + *
> + * @return
> + *  - 0: Success, queue buffer stats retrieved.
> + *  - <0: Error code on failure.
> + */
> +__rte_experimental
> +int
> +rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
> +               uint16_t eth_dev_id,
> +               uint16_t rx_queue_id,
> +               struct rte_event_eth_rx_adapter_queue_stats *stats);
> +
> +/**
> + * Reset Rx queue statistics.
> + *
> + * @param id
> + *  Adapter identifier.
> + *
> + * @param eth_dev_id
> + *  Port identifier of Ethernet device.
> + *
> + * @param rx_queue_id
> + *  Ethernet device receive queue index.
> + *
> + * @return
> + *  - 0: Success, queue buffer stats retrieved.
> + *  - <0: Error code on failure.
> + */
> +__rte_experimental
> +int
> +rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
> +               uint16_t eth_dev_id,
> +               uint16_t rx_queue_id);
>
>  #ifdef __cplusplus
>  }
> diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
> index cd37164141..ade1f1182e 100644
> --- a/lib/eventdev/version.map
> +++ b/lib/eventdev/version.map
> @@ -103,6 +103,8 @@ EXPERIMENTAL {
>         # added in 21.11
>         rte_event_eth_rx_adapter_create_with_params;
>         rte_event_eth_rx_adapter_queue_conf_get;
> +       rte_event_eth_rx_adapter_queue_stats_get;
> +       rte_event_eth_rx_adapter_queue_stats_reset;
>  };
>
>  INTERNAL {
> --
> 2.25.1
>

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2021-11-02 11:01 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-28  4:54 [dpdk-dev] [PATCH 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
2021-10-28  4:54 ` [dpdk-dev] [PATCH 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
2021-10-28  4:54 ` [dpdk-dev] [PATCH 3/3] test/event: add unit test for Rx adapter Naga Harish K S V
2021-10-28  6:55 ` [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
2021-10-28  6:55   ` [dpdk-dev] [PATCH v2 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
2021-10-28  6:55   ` [dpdk-dev] [PATCH v2 3/3] test/event: add unit test for Rx adapter Naga Harish K S V
2021-10-28  7:06 ` [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs Naga Harish K S V
2021-10-28  7:06   ` [dpdk-dev] [PATCH v2 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
2021-10-28  7:06   ` [dpdk-dev] [PATCH v2 3/3] test/event: add unit test for Rx adapter Naga Harish K S V
2021-10-28  8:10   ` [dpdk-dev] [PATCH v2 1/3] eventdev/eth_rx: add queue stats get and reset APIs Jayatheerthan, Jay
2021-10-28 10:27   ` [dpdk-dev] [PATCH v3 " Naga Harish K S V
2021-10-28 10:27     ` [dpdk-dev] [PATCH v3 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
2021-10-28 10:27     ` [dpdk-dev] [PATCH v3 3/3] test/event: add unit test for Rx adapter Naga Harish K S V
2021-11-02 11:00     ` [dpdk-dev] [PATCH v3 1/3] eventdev/eth_rx: add queue stats get and reset APIs Jerin Jacob
2021-10-28 10:37   ` Naga Harish K S V
2021-10-28 10:37     ` [dpdk-dev] [PATCH v3 2/3] eventdev/eth_rx: support telemetry Naga Harish K S V
2021-10-28 10:37     ` [dpdk-dev] [PATCH v3 3/3] test/event: add unit test for Rx adapter Naga Harish K S V

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).