DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/2] event/dpaa: remove duplicate log macros
@ 2018-08-30  5:33 Hemant Agrawal
  2018-08-30  5:33 ` [dpdk-dev] [PATCH 2/2] event/dpaa: add select based event support Hemant Agrawal
  2018-09-25  7:02 ` [dpdk-dev] [PATCH v2 1/2] event/dpaa: remove duplicate log macros Hemant Agrawal
  0 siblings, 2 replies; 7+ messages in thread
From: Hemant Agrawal @ 2018-08-30  5:33 UTC (permalink / raw)
  To: dev; +Cc: jerin.jacob, nipun.gupta

align and cleanup the debug log prints

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa/dpaa_eventdev.c | 58 +++++++++++++++++++-------------------
 drivers/event/dpaa/dpaa_eventdev.h |  7 -----
 2 files changed, 29 insertions(+), 36 deletions(-)

diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index 5443ef5..9ddaf30 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -49,7 +49,7 @@ dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
 {
 	uint64_t cycles_per_second;
 
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 
@@ -175,7 +175,7 @@ static void
 dpaa_event_dev_info_get(struct rte_eventdev *dev,
 			struct rte_event_dev_info *dev_info)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	dev_info->driver_name = "event_dpaa";
@@ -220,8 +220,7 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
 	int ret, i;
 	uint32_t *ch_id;
 
-	EVENTDEV_DRV_FUNC_TRACE();
-
+	EVENTDEV_INIT_FUNC_TRACE();
 	priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
 	priv->nb_events_limit = conf->nb_events_limit;
 	priv->nb_event_queues = conf->nb_event_queues;
@@ -244,13 +243,14 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
 			  sizeof(uint32_t) * priv->nb_event_queues,
 			  RTE_CACHE_LINE_SIZE);
 	if (ch_id == NULL) {
-		EVENTDEV_DRV_ERR("Fail to allocate memory for dpaa channels\n");
+		DPAA_EVENTDEV_ERR("Fail to allocate memory for dpaa channels\n");
 		return -ENOMEM;
 	}
 	/* Create requested event queues within the given event device */
 	ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0);
 	if (ret < 0) {
-		EVENTDEV_DRV_ERR("Failed to create internal channel\n");
+		DPAA_EVENTDEV_ERR("qman_alloc_pool_range %u, err =%d\n",
+				 priv->nb_event_queues, ret);
 		rte_free(ch_id);
 		return ret;
 	}
@@ -283,7 +283,7 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
 	 * can be created equals to number of lcore.
 	 */
 	rte_free(ch_id);
-	EVENTDEV_DRV_LOG("Configured eventdev devid=%d", dev->data->dev_id);
+	DPAA_EVENTDEV_INFO("Configured eventdev devid=%d", dev->data->dev_id);
 
 	return 0;
 }
@@ -291,7 +291,7 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
 static int
 dpaa_event_dev_start(struct rte_eventdev *dev)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 	RTE_SET_USED(dev);
 
 	return 0;
@@ -300,14 +300,14 @@ dpaa_event_dev_start(struct rte_eventdev *dev)
 static void
 dpaa_event_dev_stop(struct rte_eventdev *dev)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 	RTE_SET_USED(dev);
 }
 
 static int
 dpaa_event_dev_close(struct rte_eventdev *dev)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 	RTE_SET_USED(dev);
 
 	return 0;
@@ -317,7 +317,7 @@ static void
 dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
 			  struct rte_event_queue_conf *queue_conf)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(queue_id);
@@ -334,14 +334,14 @@ dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
 	struct dpaa_eventdev *priv = dev->data->dev_private;
 	struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
 
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	switch (queue_conf->schedule_type) {
 	case RTE_SCHED_TYPE_PARALLEL:
 	case RTE_SCHED_TYPE_ATOMIC:
 		break;
 	case RTE_SCHED_TYPE_ORDERED:
-		EVENTDEV_DRV_ERR("Schedule type is not supported.");
+		DPAA_EVENTDEV_ERR("Schedule type is not supported.");
 		return -1;
 	}
 	evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
@@ -353,7 +353,7 @@ dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
 static void
 dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(queue_id);
@@ -363,7 +363,7 @@ static void
 dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
 				 struct rte_event_port_conf *port_conf)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(port_id);
@@ -379,7 +379,7 @@ dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
 {
 	struct dpaa_eventdev *eventdev = dev->data->dev_private;
 
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(port_conf);
 	dev->data->ports[port_id] = &eventdev->ports[port_id];
@@ -390,7 +390,7 @@ dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
 static void
 dpaa_event_port_release(void *port)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(port);
 }
@@ -466,7 +466,7 @@ dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
 {
 	const char *ethdev_driver = eth_dev->device->driver->name;
 
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 
@@ -491,14 +491,14 @@ dpaa_event_eth_rx_adapter_queue_add(
 	struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
 	int ret, i;
 
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	if (rx_queue_id == -1) {
 		for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
 			ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id,
 						     queue_conf);
 			if (ret) {
-				EVENTDEV_DRV_ERR(
+				DPAA_EVENTDEV_ERR(
 					"Event Queue attach failed:%d\n", ret);
 				goto detach_configured_queues;
 			}
@@ -508,7 +508,7 @@ dpaa_event_eth_rx_adapter_queue_add(
 
 	ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf);
 	if (ret)
-		EVENTDEV_DRV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
+		DPAA_EVENTDEV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
 	return ret;
 
 detach_configured_queues:
@@ -527,14 +527,14 @@ dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
 	int ret, i;
 	struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
 
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	if (rx_queue_id == -1) {
 		for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
 			ret = dpaa_eth_eventq_detach(eth_dev, i);
 			if (ret)
-				EVENTDEV_DRV_ERR(
+				DPAA_EVENTDEV_ERR(
 					"Event Queue detach failed:%d\n", ret);
 		}
 
@@ -543,7 +543,7 @@ dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
 
 	ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id);
 	if (ret)
-		EVENTDEV_DRV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
+		DPAA_EVENTDEV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
 	return ret;
 }
 
@@ -551,7 +551,7 @@ static int
 dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev,
 				const struct rte_eth_dev *eth_dev)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(eth_dev);
@@ -563,7 +563,7 @@ static int
 dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
 			       const struct rte_eth_dev *eth_dev)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(eth_dev);
@@ -603,7 +603,7 @@ dpaa_event_dev_create(const char *name)
 					   sizeof(struct dpaa_eventdev),
 					   rte_socket_id());
 	if (eventdev == NULL) {
-		EVENTDEV_DRV_ERR("Failed to create eventdev vdev %s", name);
+		DPAA_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
 		goto fail;
 	}
 
@@ -631,7 +631,7 @@ dpaa_event_dev_probe(struct rte_vdev_device *vdev)
 	const char *name;
 
 	name = rte_vdev_device_name(vdev);
-	EVENTDEV_DRV_LOG("Initializing %s", name);
+	DPAA_EVENTDEV_INFO("Initializing %s", name);
 
 	return dpaa_event_dev_create(name);
 }
@@ -642,7 +642,7 @@ dpaa_event_dev_remove(struct rte_vdev_device *vdev)
 	const char *name;
 
 	name = rte_vdev_device_name(vdev);
-	EVENTDEV_DRV_LOG("Closing %s", name);
+	DPAA_EVENTDEV_INFO("Closing %s", name);
 
 	return rte_event_pmd_vdev_uninit(name);
 }
diff --git a/drivers/event/dpaa/dpaa_eventdev.h b/drivers/event/dpaa/dpaa_eventdev.h
index 583e46c..3994bd6 100644
--- a/drivers/event/dpaa/dpaa_eventdev.h
+++ b/drivers/event/dpaa/dpaa_eventdev.h
@@ -12,13 +12,6 @@
 
 #define EVENTDEV_NAME_DPAA_PMD		event_dpaa1
 
-#define EVENTDEV_DRV_LOG(fmt, args...)	\
-		DPAA_EVENTDEV_INFO(fmt, ## args)
-#define EVENTDEV_DRV_FUNC_TRACE()	\
-		DPAA_EVENTDEV_DEBUG("%s() Called:\n", __func__)
-#define EVENTDEV_DRV_ERR(fmt, args...)	\
-		DPAA_EVENTDEV_ERR("%s(): " fmt "\n", __func__, ## args)
-
 #define DPAA_EVENT_MAX_PORTS			8
 #define DPAA_EVENT_MAX_QUEUES			16
 #define DPAA_EVENT_MIN_DEQUEUE_TIMEOUT	1
-- 
2.7.4

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [dpdk-dev] [PATCH 2/2] event/dpaa: add select based event support
  2018-08-30  5:33 [dpdk-dev] [PATCH 1/2] event/dpaa: remove duplicate log macros Hemant Agrawal
@ 2018-08-30  5:33 ` Hemant Agrawal
  2018-09-10 13:33   ` Jerin Jacob
  2018-09-25  7:02 ` [dpdk-dev] [PATCH v2 1/2] event/dpaa: remove duplicate log macros Hemant Agrawal
  1 sibling, 1 reply; 7+ messages in thread
From: Hemant Agrawal @ 2018-08-30  5:33 UTC (permalink / raw)
  To: dev; +Cc: jerin.jacob, nipun.gupta

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 config/common_base                       |   1 +
 config/defconfig_arm64-dpaa-linuxapp-gcc |   1 +
 drivers/event/dpaa/dpaa_eventdev.c       | 148 +++++++++++++++++++++++--------
 drivers/event/dpaa/dpaa_eventdev.h       |   8 +-
 4 files changed, 115 insertions(+), 43 deletions(-)

diff --git a/config/common_base b/config/common_base
index 4bcbaf9..01a6f17 100644
--- a/config/common_base
+++ b/config/common_base
@@ -199,6 +199,7 @@ CONFIG_RTE_LIBRTE_DPAA_BUS=n
 CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=n
 CONFIG_RTE_LIBRTE_DPAA_PMD=n
 CONFIG_RTE_LIBRTE_DPAA_HWDEBUG=n
+CONFIG_RTE_LIBRTE_DPAA_EVENT_INTR_MODE=n
 
 #
 # Compile NXP DPAA2 FSL-MC Bus
diff --git a/config/defconfig_arm64-dpaa-linuxapp-gcc b/config/defconfig_arm64-dpaa-linuxapp-gcc
index c47aec0..cdaaa4c 100644
--- a/config/defconfig_arm64-dpaa-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa-linuxapp-gcc
@@ -21,3 +21,4 @@ CONFIG_RTE_PKTMBUF_HEADROOM=128
 # NXP DPAA Bus
 CONFIG_RTE_LIBRTE_DPAA_DEBUG_DRIVER=n
 CONFIG_RTE_LIBRTE_DPAA_HWDEBUG=n
+CONFIG_RTE_LIBRTE_DPAA_EVENT_INTR_MODE=y
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index 9ddaf30..b82a8a9 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -47,14 +47,18 @@ static int
 dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
 				 uint64_t *timeout_ticks)
 {
-	uint64_t cycles_per_second;
-
 	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 
+#ifdef RTE_LIBRTE_DPAA_EVENT_INTR_MODE
+	*timeout_ticks = ns/1000;
+#else
+	uint64_t cycles_per_second;
+
 	cycles_per_second = rte_get_timer_hz();
-	*timeout_ticks = ns * (cycles_per_second / NS_PER_S);
+	*timeout_ticks = (ns * cycles_per_second) / NS_PER_S;
+#endif
 
 	return 0;
 }
@@ -100,6 +104,58 @@ dpaa_event_enqueue(void *port, const struct rte_event *ev)
 	return dpaa_event_enqueue_burst(port, ev, 1);
 }
 
+#ifdef RTE_LIBRTE_DPAA_EVENT_INTR_MODE
+static void drain_4_bytes(int fd, fd_set *fdset)
+{
+	if (FD_ISSET(fd, fdset)) {
+		/* drain 4 bytes */
+		uint32_t junk;
+		ssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk));
+		if (sjunk != sizeof(junk))
+			DPAA_EVENTDEV_ERR("UIO irq read error");
+	}
+}
+
+static inline int
+dpaa_event_dequeue_wait(uint64_t timeout_ticks)
+{
+	int fd_qman, nfds;
+	int ret;
+	fd_set readset;
+
+	/* Go into (and back out of) IRQ mode for each select,
+	 * it simplifies exit-path considerations and other
+	 * potential nastiness.
+	 */
+	struct timeval tv = {
+		.tv_sec = timeout_ticks / 1000000,
+		.tv_usec = timeout_ticks % 1000000
+	};
+
+	fd_qman = qman_thread_fd();
+	nfds = fd_qman + 1;
+	FD_ZERO(&readset);
+	FD_SET(fd_qman, &readset);
+
+	qman_irqsource_add(QM_PIRQ_DQRI);
+
+	ret = select(nfds, &readset, NULL, NULL, &tv);
+	if (ret < 0)
+		return ret;
+	/* Calling irqsource_remove() prior to thread_irq()
+	 * means thread_irq() will not process whatever caused
+	 * the interrupts, however it does ensure that, once
+	 * thread_irq() re-enables interrupts, they won't fire
+	 * again immediately.
+	 */
+	qman_irqsource_remove(~0);
+	drain_4_bytes(fd_qman, &readset);
+	qman_thread_irq();
+
+	return ret;
+}
+#endif
+
 static uint16_t
 dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
 			 uint16_t nb_events, uint64_t timeout_ticks)
@@ -107,8 +163,8 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
 	int ret;
 	u16 ch_id;
 	void *buffers[8];
-	u32 num_frames, i;
-	uint64_t wait_time, cur_ticks, start_ticks;
+	u32 num_frames, i, irq = 0;
+	uint64_t cur_ticks = 0, wait_time_ticks = 0;
 	struct dpaa_port *portal = (struct dpaa_port *)port;
 	struct rte_mbuf *mbuf;
 
@@ -147,20 +203,32 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
 	}
 	DPAA_PER_LCORE_DQRR_HELD = 0;
 
-	if (portal->timeout == DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID)
-		wait_time = timeout_ticks;
+	if (timeout_ticks)
+		wait_time_ticks = timeout_ticks;
 	else
-		wait_time = portal->timeout;
+		wait_time_ticks = portal->timeout_us;
 
-	/* Lets dequeue the frames */
-	start_ticks = rte_get_timer_cycles();
-	wait_time += start_ticks;
+#ifndef RTE_LIBRTE_DPAA_EVENT_INTR_MODE
+	wait_time_ticks += rte_get_timer_cycles();
+#endif
 	do {
+		/* Lets dequeue the frames */
 		num_frames = qman_portal_dequeue(ev, nb_events, buffers);
-		if (num_frames != 0)
+		if (irq)
+			irq = 0;
+		if (num_frames)
 			break;
+#ifdef RTE_LIBRTE_DPAA_EVENT_INTR_MODE
+		if (wait_time_ticks) { /* wait for time */
+			if (dpaa_event_dequeue_wait(wait_time_ticks) > 0) {
+				irq = 1;
+				continue;
+			}
+			break; /* no event after waiting */
+		}
+#endif
 		cur_ticks = rte_get_timer_cycles();
-	} while (cur_ticks < wait_time);
+	} while (cur_ticks < wait_time_ticks);
 
 	return num_frames;
 }
@@ -184,7 +252,7 @@ dpaa_event_dev_info_get(struct rte_eventdev *dev,
 	dev_info->max_dequeue_timeout_ns =
 		DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
 	dev_info->dequeue_timeout_ns =
-		DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
+		DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
 	dev_info->max_event_queues =
 		DPAA_EVENT_MAX_QUEUES;
 	dev_info->max_event_queue_flows =
@@ -230,15 +298,6 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
 	priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
 	priv->event_dev_cfg = conf->event_dev_cfg;
 
-	/* Check dequeue timeout method is per dequeue or global */
-	if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
-		/*
-		 * Use timeout value as given in dequeue operation.
-		 * So invalidating this timetout value.
-		 */
-		priv->dequeue_timeout_ns = 0;
-	}
-
 	ch_id = rte_malloc("dpaa-channels",
 			  sizeof(uint32_t) * priv->nb_event_queues,
 			  RTE_CACHE_LINE_SIZE);
@@ -260,24 +319,34 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
 	/* Lets prepare event ports */
 	memset(&priv->ports[0], 0,
 	      sizeof(struct dpaa_port) * priv->nb_event_ports);
+
+	/* Check dequeue timeout method is per dequeue or global */
 	if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
-		for (i = 0; i < priv->nb_event_ports; i++) {
-			priv->ports[i].timeout =
-				DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID;
-		}
-	} else if (priv->dequeue_timeout_ns == 0) {
-		for (i = 0; i < priv->nb_event_ports; i++) {
-			dpaa_event_dequeue_timeout_ticks(NULL,
-				DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS,
-				&priv->ports[i].timeout);
-		}
+		/*
+		 * Use timeout value as given in dequeue operation.
+		 * So invalidating this timeout value.
+		 */
+		priv->dequeue_timeout_ns = 0;
+
+	} else if (conf->dequeue_timeout_ns == 0) {
+		priv->dequeue_timeout_ns = DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
 	} else {
-		for (i = 0; i < priv->nb_event_ports; i++) {
-			dpaa_event_dequeue_timeout_ticks(NULL,
-				priv->dequeue_timeout_ns,
-				&priv->ports[i].timeout);
-		}
+		priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
 	}
+
+	for (i = 0; i < priv->nb_event_ports; i++) {
+#ifdef RTE_LIBRTE_DPAA_EVENT_INTR_MODE
+		priv->ports[i].timeout_us = priv->dequeue_timeout_ns/1000;
+#else
+		uint64_t cycles_per_second;
+
+		cycles_per_second = rte_get_timer_hz();
+		priv->ports[i].timeout_us =
+			(priv->dequeue_timeout_ns * cycles_per_second)
+				/ NS_PER_S;
+#endif
+	}
+
 	/*
 	 * TODO: Currently portals are affined with threads. Maximum threads
 	 * can be created equals to number of lcore.
@@ -454,7 +523,8 @@ dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
 		event_queue->event_port = NULL;
 	}
 
-	event_port->num_linked_evq = event_port->num_linked_evq - i;
+	if (event_port->num_linked_evq)
+		event_port->num_linked_evq = event_port->num_linked_evq - i;
 
 	return (int)i;
 }
diff --git a/drivers/event/dpaa/dpaa_eventdev.h b/drivers/event/dpaa/dpaa_eventdev.h
index 3994bd6..2021339 100644
--- a/drivers/event/dpaa/dpaa_eventdev.h
+++ b/drivers/event/dpaa/dpaa_eventdev.h
@@ -12,8 +12,8 @@
 
 #define EVENTDEV_NAME_DPAA_PMD		event_dpaa1
 
-#define DPAA_EVENT_MAX_PORTS			8
-#define DPAA_EVENT_MAX_QUEUES			16
+#define DPAA_EVENT_MAX_PORTS			4
+#define DPAA_EVENT_MAX_QUEUES			8
 #define DPAA_EVENT_MIN_DEQUEUE_TIMEOUT	1
 #define DPAA_EVENT_MAX_DEQUEUE_TIMEOUT	(UINT32_MAX - 1)
 #define DPAA_EVENT_MAX_QUEUE_FLOWS		2048
@@ -21,7 +21,7 @@
 #define DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS	0
 #define DPAA_EVENT_MAX_EVENT_PORT		RTE_MIN(RTE_MAX_LCORE, INT8_MAX)
 #define DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH	8
-#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS	100UL
+#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS	100000UL
 #define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID	((uint64_t)-1)
 #define DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH	1
 #define DPAA_EVENT_MAX_NUM_EVENTS		(INT32_MAX - 1)
@@ -54,7 +54,7 @@ struct dpaa_port {
 	struct dpaa_eventq evq_info[DPAA_EVENT_MAX_QUEUES];
 	uint8_t num_linked_evq;
 	uint8_t is_port_linked;
-	uint64_t timeout;
+	uint64_t timeout_us;
 };
 
 struct dpaa_eventdev {
-- 
2.7.4

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-dev] [PATCH 2/2] event/dpaa: add select based event support
  2018-08-30  5:33 ` [dpdk-dev] [PATCH 2/2] event/dpaa: add select based event support Hemant Agrawal
@ 2018-09-10 13:33   ` Jerin Jacob
  0 siblings, 0 replies; 7+ messages in thread
From: Jerin Jacob @ 2018-09-10 13:33 UTC (permalink / raw)
  To: Hemant Agrawal; +Cc: dev, nipun.gupta

-----Original Message-----
> Date: Thu, 30 Aug 2018 11:03:16 +0530
> From: Hemant Agrawal <hemant.agrawal@nxp.com>
> To: dev@dpdk.org
> CC: jerin.jacob@caviumnetworks.com, nipun.gupta@nxp.com
> Subject: [PATCH 2/2] event/dpaa: add select based event support
> X-Mailer: git-send-email 2.7.4
> 
> External Email
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
>  config/common_base                       |   1 +
>  config/defconfig_arm64-dpaa-linuxapp-gcc |   1 +
>  drivers/event/dpaa/dpaa_eventdev.c       | 148 +++++++++++++++++++++++--------
>  drivers/event/dpaa/dpaa_eventdev.h       |   8 +-
>  4 files changed, 115 insertions(+), 43 deletions(-)
> 
> diff --git a/config/common_base b/config/common_base
> index 4bcbaf9..01a6f17 100644
> --- a/config/common_base
> +++ b/config/common_base
> @@ -199,6 +199,7 @@ CONFIG_RTE_LIBRTE_DPAA_BUS=n
>  CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=n
>  CONFIG_RTE_LIBRTE_DPAA_PMD=n
>  CONFIG_RTE_LIBRTE_DPAA_HWDEBUG=n
> +CONFIG_RTE_LIBRTE_DPAA_EVENT_INTR_MODE=n
> +#ifdef RTE_LIBRTE_DPAA_EVENT_INTR_MODE


Please don't add new compile time options. You can use
devargs to select this mode and have different function
pointer to choose this mode at runtime.


> +static void drain_4_bytes(int fd, fd_set *fdset)
> +{
> +       if (FD_ISSET(fd, fdset)) {
> +               /* drain 4 bytes */
> +               uint32_t junk;
> +               ssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk));
> +               if (sjunk != sizeof(junk))
> +                       DPAA_EVENTDEV_ERR("UIO irq read error");
> +       }
> +}
> +
> +static inline int
> +dpaa_event_dequeue_wait(uint64_t timeout_ticks)
> +{
> +       int fd_qman, nfds;
> +       int ret;
> +       fd_set readset;
> +
> +       /* Go into (and back out of) IRQ mode for each select,
> +        * it simplifies exit-path considerations and other
> +        * potential nastiness.
> +        */
> +       struct timeval tv = {
> +               .tv_sec = timeout_ticks / 1000000,
> +               .tv_usec = timeout_ticks % 1000000
> +       };
> +
> +       fd_qman = qman_thread_fd();
> +       nfds = fd_qman + 1;
> +       FD_ZERO(&readset);
> +       FD_SET(fd_qman, &readset);
> +
> +       qman_irqsource_add(QM_PIRQ_DQRI);
> +
> +       ret = select(nfds, &readset, NULL, NULL, &tv);
> +       if (ret < 0)
> +               return ret;
> +       /* Calling irqsource_remove() prior to thread_irq()
> +        * means thread_irq() will not process whatever caused
> +        * the interrupts, however it does ensure that, once
> +        * thread_irq() re-enables interrupts, they won't fire
> +        * again immediately.
> +        */
> +       qman_irqsource_remove(~0);
> +       drain_4_bytes(fd_qman, &readset);
> +       qman_thread_irq();
> +
> +       return ret;
> +}
> +#endif
> +
>  static uint16_t
>  dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
>                          uint16_t nb_events, uint64_t timeout_ticks)
> @@ -107,8 +163,8 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
>         int ret;
>         u16 ch_id;
>         void *buffers[8];
> -       u32 num_frames, i;
> -       uint64_t wait_time, cur_ticks, start_ticks;
> +       u32 num_frames, i, irq = 0;
> +       uint64_t cur_ticks = 0, wait_time_ticks = 0;
>         struct dpaa_port *portal = (struct dpaa_port *)port;
>         struct rte_mbuf *mbuf;
> 
> @@ -147,20 +203,32 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
>         }
>         DPAA_PER_LCORE_DQRR_HELD = 0;
> 
> -       if (portal->timeout == DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID)
> -               wait_time = timeout_ticks;
> +       if (timeout_ticks)
> +               wait_time_ticks = timeout_ticks;
>         else
> -               wait_time = portal->timeout;
> +               wait_time_ticks = portal->timeout_us;
> 
> -       /* Lets dequeue the frames */
> -       start_ticks = rte_get_timer_cycles();
> -       wait_time += start_ticks;
> +#ifndef RTE_LIBRTE_DPAA_EVENT_INTR_MODE
> +       wait_time_ticks += rte_get_timer_cycles();
> +#endif
>         do {
> +               /* Lets dequeue the frames */
>                 num_frames = qman_portal_dequeue(ev, nb_events, buffers);
> -               if (num_frames != 0)
> +               if (irq)
> +                       irq = 0;
> +               if (num_frames)
>                         break;
> +#ifdef RTE_LIBRTE_DPAA_EVENT_INTR_MODE
> +               if (wait_time_ticks) { /* wait for time */
> +                       if (dpaa_event_dequeue_wait(wait_time_ticks) > 0) {
> +                               irq = 1;
> +                               continue;
> +                       }
> +                       break; /* no event after waiting */
> +               }
> +#endif
>                 cur_ticks = rte_get_timer_cycles();
> -       } while (cur_ticks < wait_time);
> +       } while (cur_ticks < wait_time_ticks);
> 
>         return num_frames;
>  }
> @@ -184,7 +252,7 @@ dpaa_event_dev_info_get(struct rte_eventdev *dev,
>         dev_info->max_dequeue_timeout_ns =
>                 DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
>         dev_info->dequeue_timeout_ns =
> -               DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
> +               DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
>         dev_info->max_event_queues =
>                 DPAA_EVENT_MAX_QUEUES;
>         dev_info->max_event_queue_flows =
> @@ -230,15 +298,6 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
>         priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
>         priv->event_dev_cfg = conf->event_dev_cfg;
> 
> -       /* Check dequeue timeout method is per dequeue or global */
> -       if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
> -               /*
> -                * Use timeout value as given in dequeue operation.
> -                * So invalidating this timetout value.
> -                */
> -               priv->dequeue_timeout_ns = 0;
> -       }
> -
>         ch_id = rte_malloc("dpaa-channels",
>                           sizeof(uint32_t) * priv->nb_event_queues,
>                           RTE_CACHE_LINE_SIZE);
> @@ -260,24 +319,34 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
>         /* Lets prepare event ports */
>         memset(&priv->ports[0], 0,
>               sizeof(struct dpaa_port) * priv->nb_event_ports);
> +
> +       /* Check dequeue timeout method is per dequeue or global */
>         if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
> -               for (i = 0; i < priv->nb_event_ports; i++) {
> -                       priv->ports[i].timeout =
> -                               DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID;
> -               }
> -       } else if (priv->dequeue_timeout_ns == 0) {
> -               for (i = 0; i < priv->nb_event_ports; i++) {
> -                       dpaa_event_dequeue_timeout_ticks(NULL,
> -                               DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS,
> -                               &priv->ports[i].timeout);
> -               }
> +               /*
> +                * Use timeout value as given in dequeue operation.
> +                * So invalidating this timeout value.
> +                */
> +               priv->dequeue_timeout_ns = 0;
> +
> +       } else if (conf->dequeue_timeout_ns == 0) {
> +               priv->dequeue_timeout_ns = DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
>         } else {
> -               for (i = 0; i < priv->nb_event_ports; i++) {
> -                       dpaa_event_dequeue_timeout_ticks(NULL,
> -                               priv->dequeue_timeout_ns,
> -                               &priv->ports[i].timeout);
> -               }
> +               priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
>         }
> +
> +       for (i = 0; i < priv->nb_event_ports; i++) {
> +#ifdef RTE_LIBRTE_DPAA_EVENT_INTR_MODE
> +               priv->ports[i].timeout_us = priv->dequeue_timeout_ns/1000;
> +#else
> +               uint64_t cycles_per_second;
> +
> +               cycles_per_second = rte_get_timer_hz();
> +               priv->ports[i].timeout_us =
> +                       (priv->dequeue_timeout_ns * cycles_per_second)
> +                               / NS_PER_S;
> +#endif
> +       }
> +
>         /*
>          * TODO: Currently portals are affined with threads. Maximum threads
>          * can be created equals to number of lcore.
> @@ -454,7 +523,8 @@ dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
>                 event_queue->event_port = NULL;
>         }
> 
> -       event_port->num_linked_evq = event_port->num_linked_evq - i;
> +       if (event_port->num_linked_evq)
> +               event_port->num_linked_evq = event_port->num_linked_evq - i;
> 
>         return (int)i;
>  }
> diff --git a/drivers/event/dpaa/dpaa_eventdev.h b/drivers/event/dpaa/dpaa_eventdev.h
> index 3994bd6..2021339 100644
> --- a/drivers/event/dpaa/dpaa_eventdev.h
> +++ b/drivers/event/dpaa/dpaa_eventdev.h
> @@ -12,8 +12,8 @@
> 
>  #define EVENTDEV_NAME_DPAA_PMD         event_dpaa1
> 
> -#define DPAA_EVENT_MAX_PORTS                   8
> -#define DPAA_EVENT_MAX_QUEUES                  16
> +#define DPAA_EVENT_MAX_PORTS                   4
> +#define DPAA_EVENT_MAX_QUEUES                  8
>  #define DPAA_EVENT_MIN_DEQUEUE_TIMEOUT 1
>  #define DPAA_EVENT_MAX_DEQUEUE_TIMEOUT (UINT32_MAX - 1)
>  #define DPAA_EVENT_MAX_QUEUE_FLOWS             2048
> @@ -21,7 +21,7 @@
>  #define DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS   0
>  #define DPAA_EVENT_MAX_EVENT_PORT              RTE_MIN(RTE_MAX_LCORE, INT8_MAX)
>  #define DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH      8
> -#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS     100UL
> +#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS     100000UL
>  #define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID        ((uint64_t)-1)
>  #define DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH      1
>  #define DPAA_EVENT_MAX_NUM_EVENTS              (INT32_MAX - 1)
> @@ -54,7 +54,7 @@ struct dpaa_port {
>         struct dpaa_eventq evq_info[DPAA_EVENT_MAX_QUEUES];
>         uint8_t num_linked_evq;
>         uint8_t is_port_linked;
> -       uint64_t timeout;
> +       uint64_t timeout_us;
>  };
> 
>  struct dpaa_eventdev {
> --
> 2.7.4
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [dpdk-dev] [PATCH v2 1/2] event/dpaa: remove duplicate log macros
  2018-08-30  5:33 [dpdk-dev] [PATCH 1/2] event/dpaa: remove duplicate log macros Hemant Agrawal
  2018-08-30  5:33 ` [dpdk-dev] [PATCH 2/2] event/dpaa: add select based event support Hemant Agrawal
@ 2018-09-25  7:02 ` Hemant Agrawal
  2018-09-25  7:02   ` [dpdk-dev] [PATCH v2 2/2] event/dpaa: add select based event support Hemant Agrawal
  2018-10-04 14:40   ` [dpdk-dev] [PATCH v2 1/2] event/dpaa: remove duplicate log macros Jerin Jacob
  1 sibling, 2 replies; 7+ messages in thread
From: Hemant Agrawal @ 2018-09-25  7:02 UTC (permalink / raw)
  To: dev; +Cc: jerin.jacob

align and cleanup the debug log prints

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
Note: This patch has a dependency on following patch series:
http://mails.dpdk.org/archives/dev/2018-September/112433.html
which is now part of dpdk-next-net tree.

 drivers/event/dpaa/dpaa_eventdev.c | 58 +++++++++++++++++++-------------------
 drivers/event/dpaa/dpaa_eventdev.h |  7 -----
 2 files changed, 29 insertions(+), 36 deletions(-)

diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index 5443ef5..9ddaf30 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -49,7 +49,7 @@ dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
 {
 	uint64_t cycles_per_second;
 
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 
@@ -175,7 +175,7 @@ static void
 dpaa_event_dev_info_get(struct rte_eventdev *dev,
 			struct rte_event_dev_info *dev_info)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	dev_info->driver_name = "event_dpaa";
@@ -220,8 +220,7 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
 	int ret, i;
 	uint32_t *ch_id;
 
-	EVENTDEV_DRV_FUNC_TRACE();
-
+	EVENTDEV_INIT_FUNC_TRACE();
 	priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
 	priv->nb_events_limit = conf->nb_events_limit;
 	priv->nb_event_queues = conf->nb_event_queues;
@@ -244,13 +243,14 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
 			  sizeof(uint32_t) * priv->nb_event_queues,
 			  RTE_CACHE_LINE_SIZE);
 	if (ch_id == NULL) {
-		EVENTDEV_DRV_ERR("Fail to allocate memory for dpaa channels\n");
+		DPAA_EVENTDEV_ERR("Fail to allocate memory for dpaa channels\n");
 		return -ENOMEM;
 	}
 	/* Create requested event queues within the given event device */
 	ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0);
 	if (ret < 0) {
-		EVENTDEV_DRV_ERR("Failed to create internal channel\n");
+		DPAA_EVENTDEV_ERR("qman_alloc_pool_range %u, err =%d\n",
+				 priv->nb_event_queues, ret);
 		rte_free(ch_id);
 		return ret;
 	}
@@ -283,7 +283,7 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
 	 * can be created equals to number of lcore.
 	 */
 	rte_free(ch_id);
-	EVENTDEV_DRV_LOG("Configured eventdev devid=%d", dev->data->dev_id);
+	DPAA_EVENTDEV_INFO("Configured eventdev devid=%d", dev->data->dev_id);
 
 	return 0;
 }
@@ -291,7 +291,7 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
 static int
 dpaa_event_dev_start(struct rte_eventdev *dev)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 	RTE_SET_USED(dev);
 
 	return 0;
@@ -300,14 +300,14 @@ dpaa_event_dev_start(struct rte_eventdev *dev)
 static void
 dpaa_event_dev_stop(struct rte_eventdev *dev)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 	RTE_SET_USED(dev);
 }
 
 static int
 dpaa_event_dev_close(struct rte_eventdev *dev)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 	RTE_SET_USED(dev);
 
 	return 0;
@@ -317,7 +317,7 @@ static void
 dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
 			  struct rte_event_queue_conf *queue_conf)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(queue_id);
@@ -334,14 +334,14 @@ dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
 	struct dpaa_eventdev *priv = dev->data->dev_private;
 	struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
 
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	switch (queue_conf->schedule_type) {
 	case RTE_SCHED_TYPE_PARALLEL:
 	case RTE_SCHED_TYPE_ATOMIC:
 		break;
 	case RTE_SCHED_TYPE_ORDERED:
-		EVENTDEV_DRV_ERR("Schedule type is not supported.");
+		DPAA_EVENTDEV_ERR("Schedule type is not supported.");
 		return -1;
 	}
 	evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
@@ -353,7 +353,7 @@ dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
 static void
 dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(queue_id);
@@ -363,7 +363,7 @@ static void
 dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
 				 struct rte_event_port_conf *port_conf)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(port_id);
@@ -379,7 +379,7 @@ dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
 {
 	struct dpaa_eventdev *eventdev = dev->data->dev_private;
 
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(port_conf);
 	dev->data->ports[port_id] = &eventdev->ports[port_id];
@@ -390,7 +390,7 @@ dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
 static void
 dpaa_event_port_release(void *port)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(port);
 }
@@ -466,7 +466,7 @@ dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
 {
 	const char *ethdev_driver = eth_dev->device->driver->name;
 
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 
@@ -491,14 +491,14 @@ dpaa_event_eth_rx_adapter_queue_add(
 	struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
 	int ret, i;
 
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	if (rx_queue_id == -1) {
 		for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
 			ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id,
 						     queue_conf);
 			if (ret) {
-				EVENTDEV_DRV_ERR(
+				DPAA_EVENTDEV_ERR(
 					"Event Queue attach failed:%d\n", ret);
 				goto detach_configured_queues;
 			}
@@ -508,7 +508,7 @@ dpaa_event_eth_rx_adapter_queue_add(
 
 	ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf);
 	if (ret)
-		EVENTDEV_DRV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
+		DPAA_EVENTDEV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
 	return ret;
 
 detach_configured_queues:
@@ -527,14 +527,14 @@ dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
 	int ret, i;
 	struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
 
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	if (rx_queue_id == -1) {
 		for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
 			ret = dpaa_eth_eventq_detach(eth_dev, i);
 			if (ret)
-				EVENTDEV_DRV_ERR(
+				DPAA_EVENTDEV_ERR(
 					"Event Queue detach failed:%d\n", ret);
 		}
 
@@ -543,7 +543,7 @@ dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
 
 	ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id);
 	if (ret)
-		EVENTDEV_DRV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
+		DPAA_EVENTDEV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
 	return ret;
 }
 
@@ -551,7 +551,7 @@ static int
 dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev,
 				const struct rte_eth_dev *eth_dev)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(eth_dev);
@@ -563,7 +563,7 @@ static int
 dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
 			       const struct rte_eth_dev *eth_dev)
 {
-	EVENTDEV_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(eth_dev);
@@ -603,7 +603,7 @@ dpaa_event_dev_create(const char *name)
 					   sizeof(struct dpaa_eventdev),
 					   rte_socket_id());
 	if (eventdev == NULL) {
-		EVENTDEV_DRV_ERR("Failed to create eventdev vdev %s", name);
+		DPAA_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
 		goto fail;
 	}
 
@@ -631,7 +631,7 @@ dpaa_event_dev_probe(struct rte_vdev_device *vdev)
 	const char *name;
 
 	name = rte_vdev_device_name(vdev);
-	EVENTDEV_DRV_LOG("Initializing %s", name);
+	DPAA_EVENTDEV_INFO("Initializing %s", name);
 
 	return dpaa_event_dev_create(name);
 }
@@ -642,7 +642,7 @@ dpaa_event_dev_remove(struct rte_vdev_device *vdev)
 	const char *name;
 
 	name = rte_vdev_device_name(vdev);
-	EVENTDEV_DRV_LOG("Closing %s", name);
+	DPAA_EVENTDEV_INFO("Closing %s", name);
 
 	return rte_event_pmd_vdev_uninit(name);
 }
diff --git a/drivers/event/dpaa/dpaa_eventdev.h b/drivers/event/dpaa/dpaa_eventdev.h
index 583e46c..3994bd6 100644
--- a/drivers/event/dpaa/dpaa_eventdev.h
+++ b/drivers/event/dpaa/dpaa_eventdev.h
@@ -12,13 +12,6 @@
 
 #define EVENTDEV_NAME_DPAA_PMD		event_dpaa1
 
-#define EVENTDEV_DRV_LOG(fmt, args...)	\
-		DPAA_EVENTDEV_INFO(fmt, ## args)
-#define EVENTDEV_DRV_FUNC_TRACE()	\
-		DPAA_EVENTDEV_DEBUG("%s() Called:\n", __func__)
-#define EVENTDEV_DRV_ERR(fmt, args...)	\
-		DPAA_EVENTDEV_ERR("%s(): " fmt "\n", __func__, ## args)
-
 #define DPAA_EVENT_MAX_PORTS			8
 #define DPAA_EVENT_MAX_QUEUES			16
 #define DPAA_EVENT_MIN_DEQUEUE_TIMEOUT	1
-- 
2.7.4

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [dpdk-dev] [PATCH v2 2/2] event/dpaa: add select based event support
  2018-09-25  7:02 ` [dpdk-dev] [PATCH v2 1/2] event/dpaa: remove duplicate log macros Hemant Agrawal
@ 2018-09-25  7:02   ` Hemant Agrawal
  2018-09-28 11:43     ` Jerin Jacob
  2018-10-04 14:40   ` [dpdk-dev] [PATCH v2 1/2] event/dpaa: remove duplicate log macros Jerin Jacob
  1 sibling, 1 reply; 7+ messages in thread
From: Hemant Agrawal @ 2018-09-25  7:02 UTC (permalink / raw)
  To: dev; +Cc: jerin.jacob

This patch add support to use select call with qman portal fd
for timeout based dequeue request for eventdev.

If there is a event available qman portal fd will be set
and the function will be awakened. If no event is available,
it will only wait till the given timeout value.

In case of interrupt the timeout ticks are used as usecs.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
Note: This patch has a dependency on following patch series:
http://mails.dpdk.org/archives/dev/2018-September/112433.html
which is now part of dpdk-next-net tree.

 doc/guides/eventdevs/dpaa.rst      |   2 +
 drivers/event/dpaa/dpaa_eventdev.c | 283 +++++++++++++++++++++++++++++++------
 drivers/event/dpaa/dpaa_eventdev.h |  10 +-
 3 files changed, 247 insertions(+), 48 deletions(-)

diff --git a/doc/guides/eventdevs/dpaa.rst b/doc/guides/eventdevs/dpaa.rst
index 7383295..2f356d3 100644
--- a/doc/guides/eventdevs/dpaa.rst
+++ b/doc/guides/eventdevs/dpaa.rst
@@ -122,6 +122,8 @@ Example:
 
     ./your_eventdev_application --vdev="event_dpaa1"
 
+* Use dev arg option ``disable_intr=1`` to disable the interrupt mode
+
 Limitations
 -----------
 
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index 9ddaf30..1e247e4 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -30,6 +30,7 @@
 #include <rte_dpaa_bus.h>
 #include <rte_dpaa_logs.h>
 #include <rte_cycles.h>
+#include <rte_kvargs.h>
 
 #include <dpaa_ethdev.h>
 #include "dpaa_eventdev.h"
@@ -43,22 +44,34 @@
  * 1 Eventdev can have N Eventqueue
  */
 
+#define DISABLE_INTR_MODE "disable_intr"
+
 static int
 dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
 				 uint64_t *timeout_ticks)
 {
-	uint64_t cycles_per_second;
-
 	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 
+	uint64_t cycles_per_second;
+
 	cycles_per_second = rte_get_timer_hz();
-	*timeout_ticks = ns * (cycles_per_second / NS_PER_S);
+	*timeout_ticks = (ns * cycles_per_second) / NS_PER_S;
 
 	return 0;
 }
 
+static int
+dpaa_event_dequeue_timeout_ticks_intr(struct rte_eventdev *dev, uint64_t ns,
+				 uint64_t *timeout_ticks)
+{
+	RTE_SET_USED(dev);
+
+	*timeout_ticks = ns/1000;
+	return 0;
+}
+
 static void
 dpaa_eventq_portal_add(u16 ch_id)
 {
@@ -100,6 +113,56 @@ dpaa_event_enqueue(void *port, const struct rte_event *ev)
 	return dpaa_event_enqueue_burst(port, ev, 1);
 }
 
+static void drain_4_bytes(int fd, fd_set *fdset)
+{
+	if (FD_ISSET(fd, fdset)) {
+		/* drain 4 bytes */
+		uint32_t junk;
+		ssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk));
+		if (sjunk != sizeof(junk))
+			DPAA_EVENTDEV_ERR("UIO irq read error");
+	}
+}
+
+static inline int
+dpaa_event_dequeue_wait(uint64_t timeout_ticks)
+{
+	int fd_qman, nfds;
+	int ret;
+	fd_set readset;
+
+	/* Go into (and back out of) IRQ mode for each select,
+	 * it simplifies exit-path considerations and other
+	 * potential nastiness.
+	 */
+	struct timeval tv = {
+		.tv_sec = timeout_ticks / 1000000,
+		.tv_usec = timeout_ticks % 1000000
+	};
+
+	fd_qman = qman_thread_fd();
+	nfds = fd_qman + 1;
+	FD_ZERO(&readset);
+	FD_SET(fd_qman, &readset);
+
+	qman_irqsource_add(QM_PIRQ_DQRI);
+
+	ret = select(nfds, &readset, NULL, NULL, &tv);
+	if (ret < 0)
+		return ret;
+	/* Calling irqsource_remove() prior to thread_irq()
+	 * means thread_irq() will not process whatever caused
+	 * the interrupts, however it does ensure that, once
+	 * thread_irq() re-enables interrupts, they won't fire
+	 * again immediately.
+	 */
+	qman_irqsource_remove(~0);
+	drain_4_bytes(fd_qman, &readset);
+	qman_thread_irq();
+
+	return ret;
+}
+
 static uint16_t
 dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
 			 uint16_t nb_events, uint64_t timeout_ticks)
@@ -107,8 +170,8 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
 	int ret;
 	u16 ch_id;
 	void *buffers[8];
-	u32 num_frames, i;
-	uint64_t wait_time, cur_ticks, start_ticks;
+	u32 num_frames, i, irq = 0;
+	uint64_t cur_ticks = 0, wait_time_ticks = 0;
 	struct dpaa_port *portal = (struct dpaa_port *)port;
 	struct rte_mbuf *mbuf;
 
@@ -147,20 +210,21 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
 	}
 	DPAA_PER_LCORE_DQRR_HELD = 0;
 
-	if (portal->timeout == DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID)
-		wait_time = timeout_ticks;
+	if (timeout_ticks)
+		wait_time_ticks = timeout_ticks;
 	else
-		wait_time = portal->timeout;
+		wait_time_ticks = portal->timeout_us;
 
-	/* Lets dequeue the frames */
-	start_ticks = rte_get_timer_cycles();
-	wait_time += start_ticks;
+	wait_time_ticks += rte_get_timer_cycles();
 	do {
+		/* Lets dequeue the frames */
 		num_frames = qman_portal_dequeue(ev, nb_events, buffers);
-		if (num_frames != 0)
+		if (irq)
+			irq = 0;
+		if (num_frames)
 			break;
 		cur_ticks = rte_get_timer_cycles();
-	} while (cur_ticks < wait_time);
+	} while (cur_ticks < wait_time_ticks);
 
 	return num_frames;
 }
@@ -171,6 +235,86 @@ dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks)
 	return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks);
 }
 
+static uint16_t
+dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],
+			      uint16_t nb_events, uint64_t timeout_ticks)
+{
+	int ret;
+	u16 ch_id;
+	void *buffers[8];
+	u32 num_frames, i, irq = 0;
+	uint64_t cur_ticks = 0, wait_time_ticks = 0;
+	struct dpaa_port *portal = (struct dpaa_port *)port;
+	struct rte_mbuf *mbuf;
+
+	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+		/* Affine current thread context to a qman portal */
+		ret = rte_dpaa_portal_init((void *)0);
+		if (ret) {
+			DPAA_EVENTDEV_ERR("Unable to initialize portal");
+			return ret;
+		}
+	}
+
+	if (unlikely(!portal->is_port_linked)) {
+		/*
+		 * Affine event queue for current thread context
+		 * to a qman portal.
+		 */
+		for (i = 0; i < portal->num_linked_evq; i++) {
+			ch_id = portal->evq_info[i].ch_id;
+			dpaa_eventq_portal_add(ch_id);
+		}
+		portal->is_port_linked = true;
+	}
+
+	/* Check if there are atomic contexts to be released */
+	i = 0;
+	while (DPAA_PER_LCORE_DQRR_SIZE) {
+		if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
+			qman_dca_index(i, 0);
+			mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
+			mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+			DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
+			DPAA_PER_LCORE_DQRR_SIZE--;
+		}
+		i++;
+	}
+	DPAA_PER_LCORE_DQRR_HELD = 0;
+
+	if (timeout_ticks)
+		wait_time_ticks = timeout_ticks;
+	else
+		wait_time_ticks = portal->timeout_us;
+
+	do {
+		/* Lets dequeue the frames */
+		num_frames = qman_portal_dequeue(ev, nb_events, buffers);
+		if (irq)
+			irq = 0;
+		if (num_frames)
+			break;
+		if (wait_time_ticks) { /* wait for time */
+			if (dpaa_event_dequeue_wait(wait_time_ticks) > 0) {
+				irq = 1;
+				continue;
+			}
+			break; /* no event after waiting */
+		}
+		cur_ticks = rte_get_timer_cycles();
+	} while (cur_ticks < wait_time_ticks);
+
+	return num_frames;
+}
+
+static uint16_t
+dpaa_event_dequeue_intr(void *port,
+			struct rte_event *ev,
+			uint64_t timeout_ticks)
+{
+	return dpaa_event_dequeue_burst_intr(port, ev, 1, timeout_ticks);
+}
+
 static void
 dpaa_event_dev_info_get(struct rte_eventdev *dev,
 			struct rte_event_dev_info *dev_info)
@@ -184,7 +328,7 @@ dpaa_event_dev_info_get(struct rte_eventdev *dev,
 	dev_info->max_dequeue_timeout_ns =
 		DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
 	dev_info->dequeue_timeout_ns =
-		DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
+		DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
 	dev_info->max_event_queues =
 		DPAA_EVENT_MAX_QUEUES;
 	dev_info->max_event_queue_flows =
@@ -230,15 +374,6 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
 	priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
 	priv->event_dev_cfg = conf->event_dev_cfg;
 
-	/* Check dequeue timeout method is per dequeue or global */
-	if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
-		/*
-		 * Use timeout value as given in dequeue operation.
-		 * So invalidating this timetout value.
-		 */
-		priv->dequeue_timeout_ns = 0;
-	}
-
 	ch_id = rte_malloc("dpaa-channels",
 			  sizeof(uint32_t) * priv->nb_event_queues,
 			  RTE_CACHE_LINE_SIZE);
@@ -260,24 +395,35 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
 	/* Lets prepare event ports */
 	memset(&priv->ports[0], 0,
 	      sizeof(struct dpaa_port) * priv->nb_event_ports);
+
+	/* Check dequeue timeout method is per dequeue or global */
 	if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
-		for (i = 0; i < priv->nb_event_ports; i++) {
-			priv->ports[i].timeout =
-				DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID;
-		}
-	} else if (priv->dequeue_timeout_ns == 0) {
-		for (i = 0; i < priv->nb_event_ports; i++) {
-			dpaa_event_dequeue_timeout_ticks(NULL,
-				DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS,
-				&priv->ports[i].timeout);
-		}
+		/*
+		 * Use timeout value as given in dequeue operation.
+		 * So invalidating this timeout value.
+		 */
+		priv->dequeue_timeout_ns = 0;
+
+	} else if (conf->dequeue_timeout_ns == 0) {
+		priv->dequeue_timeout_ns = DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
 	} else {
-		for (i = 0; i < priv->nb_event_ports; i++) {
-			dpaa_event_dequeue_timeout_ticks(NULL,
-				priv->dequeue_timeout_ns,
-				&priv->ports[i].timeout);
+		priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
+	}
+
+	for (i = 0; i < priv->nb_event_ports; i++) {
+		if (priv->intr_mode) {
+			priv->ports[i].timeout_us =
+				priv->dequeue_timeout_ns/1000;
+		} else {
+			uint64_t cycles_per_second;
+
+			cycles_per_second = rte_get_timer_hz();
+			priv->ports[i].timeout_us =
+				(priv->dequeue_timeout_ns * cycles_per_second)
+					/ NS_PER_S;
 		}
 	}
+
 	/*
 	 * TODO: Currently portals are affined with threads. Maximum threads
 	 * can be created equals to number of lcore.
@@ -454,7 +600,8 @@ dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
 		event_queue->event_port = NULL;
 	}
 
-	event_port->num_linked_evq = event_port->num_linked_evq - i;
+	if (event_port->num_linked_evq)
+		event_port->num_linked_evq = event_port->num_linked_evq - i;
 
 	return (int)i;
 }
@@ -593,8 +740,44 @@ static struct rte_eventdev_ops dpaa_eventdev_ops = {
 	.eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop,
 };
 
+static int flag_check_handler(__rte_unused const char *key,
+		const char *value, __rte_unused void *opaque)
+{
+	if (strcmp(value, "1"))
+		return -1;
+
+	return 0;
+}
+
+static int
+dpaa_event_check_flags(const char *params)
+{
+	struct rte_kvargs *kvlist;
+
+	if (params == NULL || params[0] == '\0')
+		return 0;
+
+	kvlist = rte_kvargs_parse(params, NULL);
+	if (kvlist == NULL)
+		return 0;
+
+	if (!rte_kvargs_count(kvlist, DISABLE_INTR_MODE)) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
+	/* INTR MODE is disabled when there's key-value pair: disable_intr = 1*/
+	if (rte_kvargs_process(kvlist, DISABLE_INTR_MODE,
+				flag_check_handler, NULL) < 0) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
+	rte_kvargs_free(kvlist);
+
+	return 1;
+}
+
 static int
-dpaa_event_dev_create(const char *name)
+dpaa_event_dev_create(const char *name, const char *params)
 {
 	struct rte_eventdev *eventdev;
 	struct dpaa_eventdev *priv;
@@ -606,18 +789,27 @@ dpaa_event_dev_create(const char *name)
 		DPAA_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
 		goto fail;
 	}
+	priv = eventdev->data->dev_private;
 
 	eventdev->dev_ops       = &dpaa_eventdev_ops;
 	eventdev->enqueue       = dpaa_event_enqueue;
 	eventdev->enqueue_burst = dpaa_event_enqueue_burst;
-	eventdev->dequeue       = dpaa_event_dequeue;
-	eventdev->dequeue_burst = dpaa_event_dequeue_burst;
+
+	if (dpaa_event_check_flags(params)) {
+		eventdev->dequeue	= dpaa_event_dequeue;
+		eventdev->dequeue_burst = dpaa_event_dequeue_burst;
+	} else {
+		priv->intr_mode = 1;
+		eventdev->dev_ops->timeout_ticks =
+				dpaa_event_dequeue_timeout_ticks_intr;
+		eventdev->dequeue	= dpaa_event_dequeue_intr;
+		eventdev->dequeue_burst = dpaa_event_dequeue_burst_intr;
+	}
 
 	/* For secondary processes, the primary has done all the work */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
 
-	priv = eventdev->data->dev_private;
 	priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
 
 	return 0;
@@ -629,11 +821,14 @@ static int
 dpaa_event_dev_probe(struct rte_vdev_device *vdev)
 {
 	const char *name;
+	const char *params;
 
 	name = rte_vdev_device_name(vdev);
 	DPAA_EVENTDEV_INFO("Initializing %s", name);
 
-	return dpaa_event_dev_create(name);
+	params = rte_vdev_device_args(vdev);
+
+	return dpaa_event_dev_create(name, params);
 }
 
 static int
@@ -653,3 +848,5 @@ static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = {
 };
 
 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(EVENTDEV_NAME_DPAA_PMD,
+		DISABLE_INTR_MODE "=<int>");
diff --git a/drivers/event/dpaa/dpaa_eventdev.h b/drivers/event/dpaa/dpaa_eventdev.h
index 3994bd6..8134e6b 100644
--- a/drivers/event/dpaa/dpaa_eventdev.h
+++ b/drivers/event/dpaa/dpaa_eventdev.h
@@ -12,8 +12,8 @@
 
 #define EVENTDEV_NAME_DPAA_PMD		event_dpaa1
 
-#define DPAA_EVENT_MAX_PORTS			8
-#define DPAA_EVENT_MAX_QUEUES			16
+#define DPAA_EVENT_MAX_PORTS			4
+#define DPAA_EVENT_MAX_QUEUES			8
 #define DPAA_EVENT_MIN_DEQUEUE_TIMEOUT	1
 #define DPAA_EVENT_MAX_DEQUEUE_TIMEOUT	(UINT32_MAX - 1)
 #define DPAA_EVENT_MAX_QUEUE_FLOWS		2048
@@ -21,7 +21,7 @@
 #define DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS	0
 #define DPAA_EVENT_MAX_EVENT_PORT		RTE_MIN(RTE_MAX_LCORE, INT8_MAX)
 #define DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH	8
-#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS	100UL
+#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS	100000UL
 #define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID	((uint64_t)-1)
 #define DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH	1
 #define DPAA_EVENT_MAX_NUM_EVENTS		(INT32_MAX - 1)
@@ -54,7 +54,7 @@ struct dpaa_port {
 	struct dpaa_eventq evq_info[DPAA_EVENT_MAX_QUEUES];
 	uint8_t num_linked_evq;
 	uint8_t is_port_linked;
-	uint64_t timeout;
+	uint64_t timeout_us;
 };
 
 struct dpaa_eventdev {
@@ -65,7 +65,7 @@ struct dpaa_eventdev {
 	uint8_t max_event_queues;
 	uint8_t nb_event_queues;
 	uint8_t nb_event_ports;
-	uint8_t resvd;
+	uint8_t intr_mode;
 	uint32_t nb_event_queue_flows;
 	uint32_t nb_event_port_dequeue_depth;
 	uint32_t nb_event_port_enqueue_depth;
-- 
2.7.4

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-dev] [PATCH v2 2/2] event/dpaa: add select based event support
  2018-09-25  7:02   ` [dpdk-dev] [PATCH v2 2/2] event/dpaa: add select based event support Hemant Agrawal
@ 2018-09-28 11:43     ` Jerin Jacob
  0 siblings, 0 replies; 7+ messages in thread
From: Jerin Jacob @ 2018-09-28 11:43 UTC (permalink / raw)
  To: Hemant Agrawal; +Cc: dev

-----Original Message-----
> Date: Tue, 25 Sep 2018 12:32:35 +0530
> From: Hemant Agrawal <hemant.agrawal@nxp.com>
> To: dev@dpdk.org
> CC: jerin.jacob@caviumnetworks.com
> Subject: [PATCH v2 2/2] event/dpaa: add select based event support
> X-Mailer: git-send-email 2.7.4
> 
> 
> This patch add support to use select call with qman portal fd
> for timeout based dequeue request for eventdev.
> 
> If there is a event available qman portal fd will be set
> and the function will be awakened. If no event is available,
> it will only wait till the given timeout value.
> 
> In case of interrupt the timeout ticks are used as usecs.
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
> Note: This patch has a dependency on following patch series:
> http://mails.dpdk.org/archives/dev/2018-September/112433.html
> which is now part of dpdk-next-net tree.

I will pull this patch when the depended patch show up on master branch.

Currently it has following build errors, I assume it is due to dependency
patch.

/export/dpdk-next-eventdev/drivers/event/dpaa/dpaa_eventdev.c: In
function ‘drain_4_bytes’:
/export/dpdk-next-eventdev/drivers/event/dpaa/dpaa_eventdev.c:121:24:
error: implicit declaration of function ‘qman_thread_fd’; did you mean
‘qman_thread_irq’? [-Werror=implicit-function-declaration]
   ssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk));
                        ^~~~~~~~~~~~~~
                        qman_thread_irq
/export/dpdk-next-eventdev/drivers/event/dpaa/dpaa_eventdev.c:121:24:
error: nested extern declaration of ‘qman_thread_fd’
[-Werror=nested-externs]
/export/dpdk-next-eventdev/drivers/event/dpaa/dpaa_eventdev.c: In
function ‘dpaa_event_dequeue_wait’:
/export/dpdk-next-eventdev/drivers/event/dpaa/dpaa_eventdev.c:148:2:
error: implicit declaration of function ‘qman_irqsource_add’; did you
mean ‘qman_reserve_fqid’? [-Werror=implicit-function-declaration]
  qman_irqsource_add(QM_PIRQ_DQRI);
  ^~~~~~~~~~~~~~~~~~
  qman_reserve_fqid
/export/dpdk-next-eventdev/drivers/event/dpaa/dpaa_eventdev.c:148:2:
error: nested extern declaration of ‘qman_irqsource_add’
[-Werror=nested-externs]
/export/dpdk-next-eventdev/drivers/event/dpaa/dpaa_eventdev.c:159:2:
error: implicit declaration of function ‘qman_irqsource_remove’; did you
mean ‘qman_reserve_pool’? [-Werror=implicit-function-declaration]
  qman_irqsource_remove(~0);
  ^~~~~~~~~~~~~~~~~~~~~
  qman_reserve_pool
/export/dpdk-next-eventdev/drivers/event/dpaa/dpaa_eventdev.c:159:2:
error: nested extern declaration of ‘qman_irqsource_remove’
[-Werror=nested-externs]

 
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/2] event/dpaa: remove duplicate log macros
  2018-09-25  7:02 ` [dpdk-dev] [PATCH v2 1/2] event/dpaa: remove duplicate log macros Hemant Agrawal
  2018-09-25  7:02   ` [dpdk-dev] [PATCH v2 2/2] event/dpaa: add select based event support Hemant Agrawal
@ 2018-10-04 14:40   ` Jerin Jacob
  1 sibling, 0 replies; 7+ messages in thread
From: Jerin Jacob @ 2018-10-04 14:40 UTC (permalink / raw)
  To: Hemant Agrawal; +Cc: dev

-----Original Message-----
> Date: Tue, 25 Sep 2018 12:32:34 +0530
> From: Hemant Agrawal <hemant.agrawal@nxp.com>
> To: dev@dpdk.org
> CC: jerin.jacob@caviumnetworks.com
> Subject: [PATCH v2 1/2] event/dpaa: remove duplicate log macros
> X-Mailer: git-send-email 2.7.4
> 
> align and cleanup the debug log prints
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
> Note: This patch has a dependency on following patch series:
> http://mails.dpdk.org/archives/dev/2018-September/112433.html
> which is now part of dpdk-next-net tree.

Series applied to dpdk-next-eventdev/master. Thanks.

> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2018-10-04 14:40 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-30  5:33 [dpdk-dev] [PATCH 1/2] event/dpaa: remove duplicate log macros Hemant Agrawal
2018-08-30  5:33 ` [dpdk-dev] [PATCH 2/2] event/dpaa: add select based event support Hemant Agrawal
2018-09-10 13:33   ` Jerin Jacob
2018-09-25  7:02 ` [dpdk-dev] [PATCH v2 1/2] event/dpaa: remove duplicate log macros Hemant Agrawal
2018-09-25  7:02   ` [dpdk-dev] [PATCH v2 2/2] event/dpaa: add select based event support Hemant Agrawal
2018-09-28 11:43     ` Jerin Jacob
2018-10-04 14:40   ` [dpdk-dev] [PATCH v2 1/2] event/dpaa: remove duplicate log macros Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).