DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/5] event/dpaa2: fix mbuf assignment in atomic processing
@ 2018-08-30  6:03 Hemant Agrawal
  2018-08-30  6:03 ` [dpdk-dev] [PATCH 2/5] event/dpaa2: rename evq info to dpaa2 eventq Hemant Agrawal
                   ` (4 more replies)
  0 siblings, 5 replies; 12+ messages in thread
From: Hemant Agrawal @ 2018-08-30  6:03 UTC (permalink / raw)
  To: dev; +Cc: jerin.jacob, nipun.gupta, stable

Fixes: 7b6edb640b73 ("event/dpaa2: have separate structure to hold dqrr entries")
Cc: stable@dpdk.org

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index ea1e5cc..ea9d868 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -197,6 +197,7 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
 	ev->mbuf->seqn = dqrr_index + 1;
 	DPAA2_PER_LCORE_DQRR_SIZE++;
 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
+	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
 }
 
 static uint16_t
-- 
2.7.4

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 2/5] event/dpaa2: rename evq info to dpaa2 eventq
  2018-08-30  6:03 [dpdk-dev] [PATCH 1/5] event/dpaa2: fix mbuf assignment in atomic processing Hemant Agrawal
@ 2018-08-30  6:03 ` Hemant Agrawal
  2018-08-30  6:03 ` [dpdk-dev] [PATCH 3/5] event/dpaa2: enchance timeout handling Hemant Agrawal
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 12+ messages in thread
From: Hemant Agrawal @ 2018-08-30  6:03 UTC (permalink / raw)
  To: dev; +Cc: jerin.jacob, nipun.gupta

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 8 ++++----
 drivers/event/dpaa2/dpaa2_eventdev.h | 5 +++--
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index ea9d868..c4064a4 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -58,7 +58,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 			((struct dpaa2_io_portal_t *)port)->eventdev;
 	struct dpaa2_eventdev *priv = ev_dev->data->dev_private;
 	uint32_t queue_id = ev[0].queue_id;
-	struct evq_info_t *evq_info = &priv->evq_info[queue_id];
+	struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
 	uint32_t fqid;
 	struct qbman_swp *swp;
 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
@@ -385,7 +385,7 @@ dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
 			   const struct rte_event_queue_conf *queue_conf)
 {
 	struct dpaa2_eventdev *priv = dev->data->dev_private;
-	struct evq_info_t *evq_info =
+	struct dpaa2_eventq *evq_info =
 		&priv->evq_info[queue_id];
 
 	EVENTDEV_INIT_FUNC_TRACE();
@@ -449,7 +449,7 @@ dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
 {
 	struct dpaa2_eventdev *priv = dev->data->dev_private;
 	struct dpaa2_io_portal_t *dpaa2_portal = port;
-	struct evq_info_t *evq_info;
+	struct dpaa2_eventq *evq_info;
 	int i;
 
 	EVENTDEV_INIT_FUNC_TRACE();
@@ -473,7 +473,7 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
 {
 	struct dpaa2_eventdev *priv = dev->data->dev_private;
 	struct dpaa2_io_portal_t *dpaa2_portal = port;
-	struct evq_info_t *evq_info;
+	struct dpaa2_eventq *evq_info;
 	uint8_t channel_index;
 	int ret, i, n;
 
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index 229f66a..d2f98c6 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -56,17 +56,18 @@ struct dpaa2_dpcon_dev {
 	uint8_t channel_index;
 };
 
-struct evq_info_t {
+struct dpaa2_eventq {
 	/* DPcon device */
 	struct dpaa2_dpcon_dev *dpcon;
 	/* Attached DPCI device */
 	struct dpaa2_dpci_dev *dpci;
 	/* Configuration provided by the user */
 	uint32_t event_queue_cfg;
+	uint32_t event_queue_id;
 };
 
 struct dpaa2_eventdev {
-	struct evq_info_t evq_info[DPAA2_EVENT_MAX_QUEUES];
+	struct dpaa2_eventq evq_info[DPAA2_EVENT_MAX_QUEUES];
 	uint32_t dequeue_timeout_ns;
 	uint8_t max_event_queues;
 	uint8_t nb_event_queues;
-- 
2.7.4

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 3/5] event/dpaa2: enchance timeout handling
  2018-08-30  6:03 [dpdk-dev] [PATCH 1/5] event/dpaa2: fix mbuf assignment in atomic processing Hemant Agrawal
  2018-08-30  6:03 ` [dpdk-dev] [PATCH 2/5] event/dpaa2: rename evq info to dpaa2 eventq Hemant Agrawal
@ 2018-08-30  6:03 ` Hemant Agrawal
  2018-09-10 13:37   ` Jerin Jacob
  2018-08-30  6:03 ` [dpdk-dev] [PATCH 4/5] event/dpaa2: support Max event port value Hemant Agrawal
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 12+ messages in thread
From: Hemant Agrawal @ 2018-08-30  6:03 UTC (permalink / raw)
  To: dev; +Cc: jerin.jacob, nipun.gupta

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 19 ++++++++++++++++---
 drivers/event/dpaa2/dpaa2_eventdev.h |  1 +
 2 files changed, 17 insertions(+), 3 deletions(-)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index c4064a4..4b56e2e 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -284,7 +284,7 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev,
 	dev_info->max_dequeue_timeout_ns =
 		DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
 	dev_info->dequeue_timeout_ns =
-		DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
+		DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
 	dev_info->max_event_queues = priv->max_event_queues;
 	dev_info->max_event_queue_flows =
 		DPAA2_EVENT_MAX_QUEUE_FLOWS;
@@ -314,7 +314,6 @@ dpaa2_eventdev_configure(const struct rte_eventdev *dev)
 
 	EVENTDEV_INIT_FUNC_TRACE();
 
-	priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
 	priv->nb_event_queues = conf->nb_event_queues;
 	priv->nb_event_ports = conf->nb_event_ports;
 	priv->nb_event_queue_flows = conf->nb_event_queue_flows;
@@ -322,6 +321,20 @@ dpaa2_eventdev_configure(const struct rte_eventdev *dev)
 	priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
 	priv->event_dev_cfg = conf->event_dev_cfg;
 
+	/* Check dequeue timeout method is per dequeue or global */
+	if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+		/*
+		 * Use timeout value as given in dequeue operation.
+		 * So invalidating this timeout value.
+		 */
+		priv->dequeue_timeout_ns = 0;
+
+	} else if (conf->dequeue_timeout_ns == 0) {
+		priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
+	} else {
+		priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
+	}
+
 	DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
 			     dev->data->dev_id);
 	return 0;
@@ -516,7 +529,7 @@ static int
 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
 			     uint64_t *timeout_ticks)
 {
-	uint32_t scale = 1;
+	uint32_t scale = 1000*1000;
 
 	EVENTDEV_INIT_FUNC_TRACE();
 
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index d2f98c6..8898024 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -21,6 +21,7 @@
 #define DPAA2_EVENT_MAX_QUEUES			16
 #define DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT		1
 #define DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT		(UINT32_MAX - 1)
+#define DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS	100UL
 #define DPAA2_EVENT_MAX_QUEUE_FLOWS		2048
 #define DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS	8
 #define DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS	0
-- 
2.7.4

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 4/5] event/dpaa2: support Max event port value
  2018-08-30  6:03 [dpdk-dev] [PATCH 1/5] event/dpaa2: fix mbuf assignment in atomic processing Hemant Agrawal
  2018-08-30  6:03 ` [dpdk-dev] [PATCH 2/5] event/dpaa2: rename evq info to dpaa2 eventq Hemant Agrawal
  2018-08-30  6:03 ` [dpdk-dev] [PATCH 3/5] event/dpaa2: enchance timeout handling Hemant Agrawal
@ 2018-08-30  6:03 ` Hemant Agrawal
  2018-08-30  6:03 ` [dpdk-dev] [PATCH 5/5] event/dpaa2: affining portal at runtime during I/O Hemant Agrawal
  2018-09-21 11:46 ` [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix mbuf assignment in atomic processing Hemant Agrawal
  4 siblings, 0 replies; 12+ messages in thread
From: Hemant Agrawal @ 2018-08-30  6:03 UTC (permalink / raw)
  To: dev; +Cc: jerin.jacob, nipun.gupta

This shall be number of available cores.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 4b56e2e..456b446 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -293,6 +293,9 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev,
 	dev_info->max_event_priority_levels =
 		DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
 	dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
+	/* we only support dpio upto number of cores*/
+	if (dev_info->max_event_ports > rte_lcore_count())
+		dev_info->max_event_ports = rte_lcore_count();
 	dev_info->max_event_port_dequeue_depth =
 		DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
 	dev_info->max_event_port_enqueue_depth =
-- 
2.7.4

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 5/5] event/dpaa2: affining portal at runtime during I/O
  2018-08-30  6:03 [dpdk-dev] [PATCH 1/5] event/dpaa2: fix mbuf assignment in atomic processing Hemant Agrawal
                   ` (2 preceding siblings ...)
  2018-08-30  6:03 ` [dpdk-dev] [PATCH 4/5] event/dpaa2: support Max event port value Hemant Agrawal
@ 2018-08-30  6:03 ` Hemant Agrawal
  2018-09-21 11:46 ` [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix mbuf assignment in atomic processing Hemant Agrawal
  4 siblings, 0 replies; 12+ messages in thread
From: Hemant Agrawal @ 2018-08-30  6:03 UTC (permalink / raw)
  To: dev; +Cc: jerin.jacob, nipun.gupta, Sunil Kumar Kori

This patch restructure the code to have the QBMAN portal
affliated at run time.
The device cleanup is also improved.

Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 277 ++++++++++++++++++++++++-----------
 drivers/event/dpaa2/dpaa2_eventdev.h |   9 ++
 2 files changed, 198 insertions(+), 88 deletions(-)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 456b446..b4e0885 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -54,31 +54,60 @@ static uint16_t
 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 			     uint16_t nb_events)
 {
-	struct rte_eventdev *ev_dev =
-			((struct dpaa2_io_portal_t *)port)->eventdev;
-	struct dpaa2_eventdev *priv = ev_dev->data->dev_private;
+
+	struct dpaa2_port *dpaa2_portal = port;
+	struct dpaa2_dpio_dev *dpio_dev;
 	uint32_t queue_id = ev[0].queue_id;
-	struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
+	struct dpaa2_eventq *evq_info;
 	uint32_t fqid;
 	struct qbman_swp *swp;
 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
 	uint32_t loop, frames_to_send;
 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
 	uint16_t num_tx = 0;
-	int ret;
-
-	RTE_SET_USED(port);
+	int i, ret;
+	uint8_t channel_index;
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+		/* Affine current thread context to a qman portal */
 		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
+		if (ret < 0) {
 			DPAA2_EVENTDEV_ERR("Failure in affining portal");
 			return 0;
 		}
 	}
-
+	/* todo - dpaa2_portal shall have dpio_dev - no per thread variable */
+	dpio_dev = DPAA2_PER_LCORE_DPIO;
 	swp = DPAA2_PER_LCORE_PORTAL;
 
+	if (likely(dpaa2_portal->is_port_linked))
+		goto skip_linking;
+
+	/* Create mapping between portal and channel to receive packets */
+	for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
+		evq_info = &dpaa2_portal->evq_info[i];
+		if (!evq_info->event_port)
+			continue;
+
+		ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
+						      CMD_PRI_LOW,
+						      dpio_dev->token,
+						      evq_info->dpcon->dpcon_id,
+						      &channel_index);
+		if (ret < 0) {
+			DPAA2_EVENTDEV_ERR(
+				"Static dequeue config failed: err(%d)", ret);
+			goto err;
+		}
+
+		qbman_swp_push_set(swp, channel_index, 1);
+		evq_info->dpcon->channel_index = channel_index;
+	}
+	dpaa2_portal->is_port_linked = true;
+
+skip_linking:
+	evq_info = &dpaa2_portal->evq_info[queue_id];
+
 	while (nb_events) {
 		frames_to_send = (nb_events >> 3) ?
 			MAX_TX_RING_SLOTS : nb_events;
@@ -99,14 +128,14 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 			qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
 			qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
 
-			if (event->mbuf->seqn) {
+			if (event->sched_type == RTE_SCHED_TYPE_ATOMIC
+				&& event->mbuf->seqn) {
 				uint8_t dqrr_index = event->mbuf->seqn - 1;
 
 				qbman_eq_desc_set_dca(&eqdesc[loop], 1,
 						      dqrr_index, 0);
 				DPAA2_PER_LCORE_DQRR_SIZE--;
-				DPAA2_PER_LCORE_DQRR_HELD &=
-					~(1 << dqrr_index);
+				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
 			}
 
 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
@@ -116,7 +145,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 			 * to avoid copy
 			 */
 			struct rte_event *ev_temp = rte_malloc(NULL,
-				sizeof(struct rte_event), 0);
+						sizeof(struct rte_event), 0);
 
 			if (!ev_temp) {
 				if (!loop)
@@ -143,6 +172,18 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 	}
 
 	return num_tx;
+err:
+	for (int n = 0; n < i; n++) {
+		evq_info = &dpaa2_portal->evq_info[n];
+		if (!evq_info->event_port)
+			continue;
+		qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
+		dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
+						dpio_dev->token,
+						evq_info->dpcon->dpcon_id);
+	}
+	return 0;
+
 }
 
 static uint16_t
@@ -205,22 +246,53 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
 			     uint16_t nb_events, uint64_t timeout_ticks)
 {
 	const struct qbman_result *dq;
+	struct dpaa2_dpio_dev *dpio_dev = NULL;
+	struct dpaa2_port *dpaa2_portal = port;
+	struct dpaa2_eventq *evq_info;
 	struct qbman_swp *swp;
 	const struct qbman_fd *fd;
 	struct dpaa2_queue *rxq;
-	int num_pkts = 0, ret, i = 0;
-
-	RTE_SET_USED(port);
+	int num_pkts = 0, ret, i = 0, n;
+	uint8_t channel_index;
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+		/* Affine current thread context to a qman portal */
 		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
+		if (ret < 0) {
 			DPAA2_EVENTDEV_ERR("Failure in affining portal");
 			return 0;
 		}
 	}
+
+	dpio_dev = DPAA2_PER_LCORE_DPIO;
 	swp = DPAA2_PER_LCORE_PORTAL;
 
+	if (likely(dpaa2_portal->is_port_linked))
+		goto skip_linking;
+
+	/* Create mapping between portal and channel to receive packets */
+	for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
+		evq_info = &dpaa2_portal->evq_info[i];
+		if (!evq_info->event_port)
+			continue;
+
+		ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
+						      CMD_PRI_LOW,
+						      dpio_dev->token,
+						      evq_info->dpcon->dpcon_id,
+						      &channel_index);
+		if (ret < 0) {
+			DPAA2_EVENTDEV_ERR(
+				"Static dequeue config failed: err(%d)", ret);
+			goto err;
+		}
+
+		qbman_swp_push_set(swp, channel_index, 1);
+		evq_info->dpcon->channel_index = channel_index;
+	}
+	dpaa2_portal->is_port_linked = true;
+
+skip_linking:
 	/* Check if there are atomic contexts to be released */
 	while (DPAA2_PER_LCORE_DQRR_SIZE) {
 		if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
@@ -259,6 +331,18 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
 	} while (num_pkts < nb_events);
 
 	return num_pkts;
+err:
+	for (n = 0; n < i; n++) {
+		evq_info = &dpaa2_portal->evq_info[n];
+		if (!evq_info->event_port)
+			continue;
+
+		qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
+		dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
+							dpio_dev->token,
+						evq_info->dpcon->dpcon_id);
+	}
+	return 0;
 }
 
 static uint16_t
@@ -387,31 +471,39 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
 }
 
-static void
-dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
-{
-	EVENTDEV_INIT_FUNC_TRACE();
-
-	RTE_SET_USED(dev);
-	RTE_SET_USED(queue_id);
-}
-
 static int
 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
 			   const struct rte_event_queue_conf *queue_conf)
 {
 	struct dpaa2_eventdev *priv = dev->data->dev_private;
-	struct dpaa2_eventq *evq_info =
-		&priv->evq_info[queue_id];
+	struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
 
 	EVENTDEV_INIT_FUNC_TRACE();
 
+	switch (queue_conf->schedule_type) {
+	case RTE_SCHED_TYPE_PARALLEL:
+	case RTE_SCHED_TYPE_ATOMIC:
+		break;
+	case RTE_SCHED_TYPE_ORDERED:
+		DPAA2_EVENTDEV_ERR("Schedule type is not supported.");
+		return -1;
+	}
 	evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
+	evq_info->event_queue_id = queue_id;
 
 	return 0;
 }
 
 static void
+dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+	EVENTDEV_INIT_FUNC_TRACE();
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(queue_id);
+}
+
+static void
 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
 			     struct rte_event_port_conf *port_conf)
 {
@@ -419,7 +511,6 @@ dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(port_id);
-	RTE_SET_USED(port_conf);
 
 	port_conf->new_event_threshold =
 		DPAA2_EVENT_MAX_NUM_EVENTS;
@@ -430,56 +521,44 @@ dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
 	port_conf->disable_implicit_release = 0;
 }
 
-static void
-dpaa2_eventdev_port_release(void *port)
-{
-	EVENTDEV_INIT_FUNC_TRACE();
-
-	RTE_SET_USED(port);
-}
-
 static int
 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
 			  const struct rte_event_port_conf *port_conf)
 {
+	char event_port_name[32];
+	struct dpaa2_port *portal;
+
 	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(port_conf);
 
-	if (!dpaa2_io_portal[port_id].dpio_dev) {
-		dpaa2_io_portal[port_id].dpio_dev =
-				dpaa2_get_qbman_swp(port_id);
-		rte_atomic16_inc(&dpaa2_io_portal[port_id].dpio_dev->ref_count);
-		if (!dpaa2_io_portal[port_id].dpio_dev)
-			return -1;
+	sprintf(event_port_name, "event-port-%d", port_id);
+	portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0);
+	if (!portal) {
+		DPAA2_EVENTDEV_ERR("Memory allocation failure");
+		return -ENOMEM;
 	}
 
-	dpaa2_io_portal[port_id].eventdev = dev;
-	dev->data->ports[port_id] = &dpaa2_io_portal[port_id];
+	memset(portal, 0, sizeof(struct dpaa2_port));
+	dev->data->ports[port_id] = portal;
 	return 0;
 }
 
-static int
-dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
-			   uint8_t queues[], uint16_t nb_unlinks)
+static void
+dpaa2_eventdev_port_release(void *port)
 {
-	struct dpaa2_eventdev *priv = dev->data->dev_private;
-	struct dpaa2_io_portal_t *dpaa2_portal = port;
-	struct dpaa2_eventq *evq_info;
-	int i;
+	struct dpaa2_port *portal = port;
 
 	EVENTDEV_INIT_FUNC_TRACE();
 
-	for (i = 0; i < nb_unlinks; i++) {
-		evq_info = &priv->evq_info[queues[i]];
-		qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
-				   evq_info->dpcon->channel_index, 0);
-		dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
-					0, dpaa2_portal->dpio_dev->token,
-			evq_info->dpcon->dpcon_id);
-	}
+	/* TODO: Cleanup is required when ports are in linked state. */
+	if (portal->is_port_linked)
+		DPAA2_EVENTDEV_WARN("Event port must be unlinked before release");
 
-	return (int)nb_unlinks;
+	if (portal)
+		rte_free(portal);
+
+	portal = NULL;
 }
 
 static int
@@ -488,46 +567,66 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
 			uint16_t nb_links)
 {
 	struct dpaa2_eventdev *priv = dev->data->dev_private;
-	struct dpaa2_io_portal_t *dpaa2_portal = port;
+	struct dpaa2_port *dpaa2_portal = port;
 	struct dpaa2_eventq *evq_info;
-	uint8_t channel_index;
-	int ret, i, n;
+	uint16_t i;
 
 	EVENTDEV_INIT_FUNC_TRACE();
 
+	RTE_SET_USED(priorities);
+
 	for (i = 0; i < nb_links; i++) {
 		evq_info = &priv->evq_info[queues[i]];
+		memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info,
+			   sizeof(struct dpaa2_eventq));
+		dpaa2_portal->evq_info[queues[i]].event_port = port;
+		dpaa2_portal->num_linked_evq++;
+	}
 
-		ret = dpio_add_static_dequeue_channel(
-			dpaa2_portal->dpio_dev->dpio,
-			CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
-			evq_info->dpcon->dpcon_id, &channel_index);
-		if (ret < 0) {
-			DPAA2_EVENTDEV_ERR(
-				"Static dequeue config failed: err(%d)", ret);
-			goto err;
-		}
+	return (int)nb_links;
+}
 
-		qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
-				   channel_index, 1);
-		evq_info->dpcon->channel_index = channel_index;
-	}
+static int
+dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
+			   uint8_t queues[], uint16_t nb_unlinks)
+{
+	struct dpaa2_port *dpaa2_portal = port;
+	int i;
+	struct dpaa2_dpio_dev *dpio_dev = NULL;
+	struct dpaa2_eventq *evq_info;
+	struct qbman_swp *swp;
 
-	RTE_SET_USED(priorities);
+	EVENTDEV_INIT_FUNC_TRACE();
 
-	return (int)nb_links;
-err:
-	for (n = 0; n < i; n++) {
-		evq_info = &priv->evq_info[queues[n]];
-		qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
-				   evq_info->dpcon->channel_index, 0);
-		dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
-					0, dpaa2_portal->dpio_dev->token,
-			evq_info->dpcon->dpcon_id);
+	RTE_SET_USED(dev);
+	RTE_SET_USED(queues);
+
+	for (i = 0; i < nb_unlinks; i++) {
+		evq_info = &dpaa2_portal->evq_info[queues[i]];
+
+		if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) {
+			/* todo dpaa2_portal shall have dpio_dev-no per lcore*/
+			dpio_dev = DPAA2_PER_LCORE_DPIO;
+			swp = DPAA2_PER_LCORE_PORTAL;
+
+			qbman_swp_push_set(swp,
+					evq_info->dpcon->channel_index, 0);
+			dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
+						dpio_dev->token,
+						evq_info->dpcon->dpcon_id);
+		}
+		memset(evq_info, 0, sizeof(struct dpaa2_eventq));
+		if (dpaa2_portal->num_linked_evq)
+			dpaa2_portal->num_linked_evq--;
 	}
-	return ret;
+
+	if (!dpaa2_portal->num_linked_evq)
+		dpaa2_portal->is_port_linked = false;
+
+	return (int)nb_unlinks;
 }
 
+
 static int
 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
 			     uint64_t *timeout_ticks)
@@ -806,6 +905,8 @@ dpaa2_eventdev_create(const char *name)
 		priv->max_event_queues++;
 	} while (dpcon_dev && dpci_dev);
 
+	RTE_LOG(INFO, PMD, "%s eventdev created\n", name);
+
 	return 0;
 fail:
 	return -EFAULT;
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index 8898024..720e0c6 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -62,11 +62,20 @@ struct dpaa2_eventq {
 	struct dpaa2_dpcon_dev *dpcon;
 	/* Attached DPCI device */
 	struct dpaa2_dpci_dev *dpci;
+	/* Mapped event port */
+	struct dpaa2_io_portal_t *event_port;
 	/* Configuration provided by the user */
 	uint32_t event_queue_cfg;
 	uint32_t event_queue_id;
 };
 
+struct dpaa2_port {
+	struct dpaa2_eventq evq_info[DPAA2_EVENT_MAX_QUEUES];
+	uint8_t num_linked_evq;
+	uint8_t is_port_linked;
+	uint64_t timeout_us;
+};
+
 struct dpaa2_eventdev {
 	struct dpaa2_eventq evq_info[DPAA2_EVENT_MAX_QUEUES];
 	uint32_t dequeue_timeout_ns;
-- 
2.7.4

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [dpdk-dev] [PATCH 3/5] event/dpaa2: enchance timeout handling
  2018-08-30  6:03 ` [dpdk-dev] [PATCH 3/5] event/dpaa2: enchance timeout handling Hemant Agrawal
@ 2018-09-10 13:37   ` Jerin Jacob
  0 siblings, 0 replies; 12+ messages in thread
From: Jerin Jacob @ 2018-09-10 13:37 UTC (permalink / raw)
  To: Hemant Agrawal; +Cc: dev, nipun.gupta

-----Original Message-----
> Date: Thu, 30 Aug 2018 11:33:57 +0530
> From: Hemant Agrawal <hemant.agrawal@nxp.com>
> To: dev@dpdk.org
> CC: jerin.jacob@caviumnetworks.com, nipun.gupta@nxp.com
> Subject: [PATCH 3/5] event/dpaa2: enchance timeout handling
> X-Mailer: git-send-email 2.7.4
> 

Missing git commit log description about timeout handling enhancement.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix mbuf assignment in atomic processing
  2018-08-30  6:03 [dpdk-dev] [PATCH 1/5] event/dpaa2: fix mbuf assignment in atomic processing Hemant Agrawal
                   ` (3 preceding siblings ...)
  2018-08-30  6:03 ` [dpdk-dev] [PATCH 5/5] event/dpaa2: affining portal at runtime during I/O Hemant Agrawal
@ 2018-09-21 11:46 ` Hemant Agrawal
  2018-09-21 11:46   ` [dpdk-dev] [PATCH v2 2/5] event/dpaa2: rename evq info to dpaa2 eventq Hemant Agrawal
                     ` (4 more replies)
  4 siblings, 5 replies; 12+ messages in thread
From: Hemant Agrawal @ 2018-09-21 11:46 UTC (permalink / raw)
  To: dev; +Cc: jerin.jacob, Hemant Agrawal, stable

Fixes: 7b6edb640b73 ("event/dpaa2: have separate structure to hold dqrr entries")
Cc: stable@dpdk.org

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index ea1e5cc..ea9d868 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -197,6 +197,7 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
 	ev->mbuf->seqn = dqrr_index + 1;
 	DPAA2_PER_LCORE_DQRR_SIZE++;
 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
+	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
 }
 
 static uint16_t
-- 
2.7.4

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH v2 2/5] event/dpaa2: rename evq info to dpaa2 eventq
  2018-09-21 11:46 ` [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix mbuf assignment in atomic processing Hemant Agrawal
@ 2018-09-21 11:46   ` Hemant Agrawal
  2018-09-21 11:46   ` [dpdk-dev] [PATCH v2 3/5] event/dpaa2: enchance timeout handling Hemant Agrawal
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 12+ messages in thread
From: Hemant Agrawal @ 2018-09-21 11:46 UTC (permalink / raw)
  To: dev; +Cc: jerin.jacob, Hemant Agrawal

This is to keep the dpaa2 driver aligned with dpaa driver.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 8 ++++----
 drivers/event/dpaa2/dpaa2_eventdev.h | 5 +++--
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index ea9d868..c4064a4 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -58,7 +58,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 			((struct dpaa2_io_portal_t *)port)->eventdev;
 	struct dpaa2_eventdev *priv = ev_dev->data->dev_private;
 	uint32_t queue_id = ev[0].queue_id;
-	struct evq_info_t *evq_info = &priv->evq_info[queue_id];
+	struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
 	uint32_t fqid;
 	struct qbman_swp *swp;
 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
@@ -385,7 +385,7 @@ dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
 			   const struct rte_event_queue_conf *queue_conf)
 {
 	struct dpaa2_eventdev *priv = dev->data->dev_private;
-	struct evq_info_t *evq_info =
+	struct dpaa2_eventq *evq_info =
 		&priv->evq_info[queue_id];
 
 	EVENTDEV_INIT_FUNC_TRACE();
@@ -449,7 +449,7 @@ dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
 {
 	struct dpaa2_eventdev *priv = dev->data->dev_private;
 	struct dpaa2_io_portal_t *dpaa2_portal = port;
-	struct evq_info_t *evq_info;
+	struct dpaa2_eventq *evq_info;
 	int i;
 
 	EVENTDEV_INIT_FUNC_TRACE();
@@ -473,7 +473,7 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
 {
 	struct dpaa2_eventdev *priv = dev->data->dev_private;
 	struct dpaa2_io_portal_t *dpaa2_portal = port;
-	struct evq_info_t *evq_info;
+	struct dpaa2_eventq *evq_info;
 	uint8_t channel_index;
 	int ret, i, n;
 
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index 229f66a..d2f98c6 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -56,17 +56,18 @@ struct dpaa2_dpcon_dev {
 	uint8_t channel_index;
 };
 
-struct evq_info_t {
+struct dpaa2_eventq {
 	/* DPcon device */
 	struct dpaa2_dpcon_dev *dpcon;
 	/* Attached DPCI device */
 	struct dpaa2_dpci_dev *dpci;
 	/* Configuration provided by the user */
 	uint32_t event_queue_cfg;
+	uint32_t event_queue_id;
 };
 
 struct dpaa2_eventdev {
-	struct evq_info_t evq_info[DPAA2_EVENT_MAX_QUEUES];
+	struct dpaa2_eventq evq_info[DPAA2_EVENT_MAX_QUEUES];
 	uint32_t dequeue_timeout_ns;
 	uint8_t max_event_queues;
 	uint8_t nb_event_queues;
-- 
2.7.4

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH v2 3/5] event/dpaa2: enchance timeout handling
  2018-09-21 11:46 ` [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix mbuf assignment in atomic processing Hemant Agrawal
  2018-09-21 11:46   ` [dpdk-dev] [PATCH v2 2/5] event/dpaa2: rename evq info to dpaa2 eventq Hemant Agrawal
@ 2018-09-21 11:46   ` Hemant Agrawal
  2018-09-21 11:46   ` [dpdk-dev] [PATCH v2 4/5] event/dpaa2: support Max event port value Hemant Agrawal
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 12+ messages in thread
From: Hemant Agrawal @ 2018-09-21 11:46 UTC (permalink / raw)
  To: dev; +Cc: jerin.jacob, Hemant Agrawal

This patch enahances:
1. configure the dequeue time out value as per the given
   method or per dequeue, global or default.
2. The timeout values were being mixed as ns or ms timeouts.
    now the values are stored as ns and scale is in ms.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
v2: added description

 drivers/event/dpaa2/dpaa2_eventdev.c | 19 ++++++++++++++++---
 drivers/event/dpaa2/dpaa2_eventdev.h |  1 +
 2 files changed, 17 insertions(+), 3 deletions(-)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index c4064a4..4b56e2e 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -284,7 +284,7 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev,
 	dev_info->max_dequeue_timeout_ns =
 		DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
 	dev_info->dequeue_timeout_ns =
-		DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
+		DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
 	dev_info->max_event_queues = priv->max_event_queues;
 	dev_info->max_event_queue_flows =
 		DPAA2_EVENT_MAX_QUEUE_FLOWS;
@@ -314,7 +314,6 @@ dpaa2_eventdev_configure(const struct rte_eventdev *dev)
 
 	EVENTDEV_INIT_FUNC_TRACE();
 
-	priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
 	priv->nb_event_queues = conf->nb_event_queues;
 	priv->nb_event_ports = conf->nb_event_ports;
 	priv->nb_event_queue_flows = conf->nb_event_queue_flows;
@@ -322,6 +321,20 @@ dpaa2_eventdev_configure(const struct rte_eventdev *dev)
 	priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
 	priv->event_dev_cfg = conf->event_dev_cfg;
 
+	/* Check dequeue timeout method is per dequeue or global */
+	if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+		/*
+		 * Use timeout value as given in dequeue operation.
+		 * So invalidating this timeout value.
+		 */
+		priv->dequeue_timeout_ns = 0;
+
+	} else if (conf->dequeue_timeout_ns == 0) {
+		priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
+	} else {
+		priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
+	}
+
 	DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
 			     dev->data->dev_id);
 	return 0;
@@ -516,7 +529,7 @@ static int
 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
 			     uint64_t *timeout_ticks)
 {
-	uint32_t scale = 1;
+	uint32_t scale = 1000*1000;
 
 	EVENTDEV_INIT_FUNC_TRACE();
 
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index d2f98c6..8898024 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -21,6 +21,7 @@
 #define DPAA2_EVENT_MAX_QUEUES			16
 #define DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT		1
 #define DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT		(UINT32_MAX - 1)
+#define DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS	100UL
 #define DPAA2_EVENT_MAX_QUEUE_FLOWS		2048
 #define DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS	8
 #define DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS	0
-- 
2.7.4

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH v2 4/5] event/dpaa2: support Max event port value
  2018-09-21 11:46 ` [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix mbuf assignment in atomic processing Hemant Agrawal
  2018-09-21 11:46   ` [dpdk-dev] [PATCH v2 2/5] event/dpaa2: rename evq info to dpaa2 eventq Hemant Agrawal
  2018-09-21 11:46   ` [dpdk-dev] [PATCH v2 3/5] event/dpaa2: enchance timeout handling Hemant Agrawal
@ 2018-09-21 11:46   ` Hemant Agrawal
  2018-09-21 11:46   ` [dpdk-dev] [PATCH v2 5/5] event/dpaa2: affining portal at runtime during I/O Hemant Agrawal
  2018-09-23  7:52   ` [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix mbuf assignment in atomic processing Jerin Jacob
  4 siblings, 0 replies; 12+ messages in thread
From: Hemant Agrawal @ 2018-09-21 11:46 UTC (permalink / raw)
  To: dev; +Cc: jerin.jacob, Hemant Agrawal

This shall be number of available cores.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 4b56e2e..456b446 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -293,6 +293,9 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev,
 	dev_info->max_event_priority_levels =
 		DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
 	dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
+	/* we only support dpio upto number of cores*/
+	if (dev_info->max_event_ports > rte_lcore_count())
+		dev_info->max_event_ports = rte_lcore_count();
 	dev_info->max_event_port_dequeue_depth =
 		DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
 	dev_info->max_event_port_enqueue_depth =
-- 
2.7.4

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH v2 5/5] event/dpaa2: affining portal at runtime during I/O
  2018-09-21 11:46 ` [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix mbuf assignment in atomic processing Hemant Agrawal
                     ` (2 preceding siblings ...)
  2018-09-21 11:46   ` [dpdk-dev] [PATCH v2 4/5] event/dpaa2: support Max event port value Hemant Agrawal
@ 2018-09-21 11:46   ` Hemant Agrawal
  2018-09-23  7:52   ` [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix mbuf assignment in atomic processing Jerin Jacob
  4 siblings, 0 replies; 12+ messages in thread
From: Hemant Agrawal @ 2018-09-21 11:46 UTC (permalink / raw)
  To: dev; +Cc: jerin.jacob, Hemant Agrawal, Sunil Kumar Kori

This patch restructure the code to have the QBMAN portal
affliated at run time for per lcore basis.
The device cleanup is also improved.

Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
v5: fixed a compilation error on x86

 drivers/event/dpaa2/dpaa2_eventdev.c | 277 ++++++++++++++++++++++++-----------
 drivers/event/dpaa2/dpaa2_eventdev.h |   9 ++
 2 files changed, 198 insertions(+), 88 deletions(-)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 456b446..24df8d7 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -54,31 +54,60 @@ static uint16_t
 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 			     uint16_t nb_events)
 {
-	struct rte_eventdev *ev_dev =
-			((struct dpaa2_io_portal_t *)port)->eventdev;
-	struct dpaa2_eventdev *priv = ev_dev->data->dev_private;
+
+	struct dpaa2_port *dpaa2_portal = port;
+	struct dpaa2_dpio_dev *dpio_dev;
 	uint32_t queue_id = ev[0].queue_id;
-	struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
+	struct dpaa2_eventq *evq_info;
 	uint32_t fqid;
 	struct qbman_swp *swp;
 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
 	uint32_t loop, frames_to_send;
 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
 	uint16_t num_tx = 0;
-	int ret;
-
-	RTE_SET_USED(port);
+	int i, n, ret;
+	uint8_t channel_index;
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+		/* Affine current thread context to a qman portal */
 		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
+		if (ret < 0) {
 			DPAA2_EVENTDEV_ERR("Failure in affining portal");
 			return 0;
 		}
 	}
-
+	/* todo - dpaa2_portal shall have dpio_dev - no per thread variable */
+	dpio_dev = DPAA2_PER_LCORE_DPIO;
 	swp = DPAA2_PER_LCORE_PORTAL;
 
+	if (likely(dpaa2_portal->is_port_linked))
+		goto skip_linking;
+
+	/* Create mapping between portal and channel to receive packets */
+	for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
+		evq_info = &dpaa2_portal->evq_info[i];
+		if (!evq_info->event_port)
+			continue;
+
+		ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
+						      CMD_PRI_LOW,
+						      dpio_dev->token,
+						      evq_info->dpcon->dpcon_id,
+						      &channel_index);
+		if (ret < 0) {
+			DPAA2_EVENTDEV_ERR(
+				"Static dequeue config failed: err(%d)", ret);
+			goto err;
+		}
+
+		qbman_swp_push_set(swp, channel_index, 1);
+		evq_info->dpcon->channel_index = channel_index;
+	}
+	dpaa2_portal->is_port_linked = true;
+
+skip_linking:
+	evq_info = &dpaa2_portal->evq_info[queue_id];
+
 	while (nb_events) {
 		frames_to_send = (nb_events >> 3) ?
 			MAX_TX_RING_SLOTS : nb_events;
@@ -99,14 +128,14 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 			qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
 			qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
 
-			if (event->mbuf->seqn) {
+			if (event->sched_type == RTE_SCHED_TYPE_ATOMIC
+				&& event->mbuf->seqn) {
 				uint8_t dqrr_index = event->mbuf->seqn - 1;
 
 				qbman_eq_desc_set_dca(&eqdesc[loop], 1,
 						      dqrr_index, 0);
 				DPAA2_PER_LCORE_DQRR_SIZE--;
-				DPAA2_PER_LCORE_DQRR_HELD &=
-					~(1 << dqrr_index);
+				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
 			}
 
 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
@@ -116,7 +145,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 			 * to avoid copy
 			 */
 			struct rte_event *ev_temp = rte_malloc(NULL,
-				sizeof(struct rte_event), 0);
+						sizeof(struct rte_event), 0);
 
 			if (!ev_temp) {
 				if (!loop)
@@ -143,6 +172,18 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 	}
 
 	return num_tx;
+err:
+	for (n = 0; n < i; n++) {
+		evq_info = &dpaa2_portal->evq_info[n];
+		if (!evq_info->event_port)
+			continue;
+		qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
+		dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
+						dpio_dev->token,
+						evq_info->dpcon->dpcon_id);
+	}
+	return 0;
+
 }
 
 static uint16_t
@@ -205,22 +246,53 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
 			     uint16_t nb_events, uint64_t timeout_ticks)
 {
 	const struct qbman_result *dq;
+	struct dpaa2_dpio_dev *dpio_dev = NULL;
+	struct dpaa2_port *dpaa2_portal = port;
+	struct dpaa2_eventq *evq_info;
 	struct qbman_swp *swp;
 	const struct qbman_fd *fd;
 	struct dpaa2_queue *rxq;
-	int num_pkts = 0, ret, i = 0;
-
-	RTE_SET_USED(port);
+	int num_pkts = 0, ret, i = 0, n;
+	uint8_t channel_index;
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+		/* Affine current thread context to a qman portal */
 		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
+		if (ret < 0) {
 			DPAA2_EVENTDEV_ERR("Failure in affining portal");
 			return 0;
 		}
 	}
+
+	dpio_dev = DPAA2_PER_LCORE_DPIO;
 	swp = DPAA2_PER_LCORE_PORTAL;
 
+	if (likely(dpaa2_portal->is_port_linked))
+		goto skip_linking;
+
+	/* Create mapping between portal and channel to receive packets */
+	for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
+		evq_info = &dpaa2_portal->evq_info[i];
+		if (!evq_info->event_port)
+			continue;
+
+		ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
+						      CMD_PRI_LOW,
+						      dpio_dev->token,
+						      evq_info->dpcon->dpcon_id,
+						      &channel_index);
+		if (ret < 0) {
+			DPAA2_EVENTDEV_ERR(
+				"Static dequeue config failed: err(%d)", ret);
+			goto err;
+		}
+
+		qbman_swp_push_set(swp, channel_index, 1);
+		evq_info->dpcon->channel_index = channel_index;
+	}
+	dpaa2_portal->is_port_linked = true;
+
+skip_linking:
 	/* Check if there are atomic contexts to be released */
 	while (DPAA2_PER_LCORE_DQRR_SIZE) {
 		if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
@@ -259,6 +331,18 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
 	} while (num_pkts < nb_events);
 
 	return num_pkts;
+err:
+	for (n = 0; n < i; n++) {
+		evq_info = &dpaa2_portal->evq_info[n];
+		if (!evq_info->event_port)
+			continue;
+
+		qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
+		dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
+							dpio_dev->token,
+						evq_info->dpcon->dpcon_id);
+	}
+	return 0;
 }
 
 static uint16_t
@@ -387,31 +471,39 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
 }
 
-static void
-dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
-{
-	EVENTDEV_INIT_FUNC_TRACE();
-
-	RTE_SET_USED(dev);
-	RTE_SET_USED(queue_id);
-}
-
 static int
 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
 			   const struct rte_event_queue_conf *queue_conf)
 {
 	struct dpaa2_eventdev *priv = dev->data->dev_private;
-	struct dpaa2_eventq *evq_info =
-		&priv->evq_info[queue_id];
+	struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
 
 	EVENTDEV_INIT_FUNC_TRACE();
 
+	switch (queue_conf->schedule_type) {
+	case RTE_SCHED_TYPE_PARALLEL:
+	case RTE_SCHED_TYPE_ATOMIC:
+		break;
+	case RTE_SCHED_TYPE_ORDERED:
+		DPAA2_EVENTDEV_ERR("Schedule type is not supported.");
+		return -1;
+	}
 	evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
+	evq_info->event_queue_id = queue_id;
 
 	return 0;
 }
 
 static void
+dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+	EVENTDEV_INIT_FUNC_TRACE();
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(queue_id);
+}
+
+static void
 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
 			     struct rte_event_port_conf *port_conf)
 {
@@ -419,7 +511,6 @@ dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(port_id);
-	RTE_SET_USED(port_conf);
 
 	port_conf->new_event_threshold =
 		DPAA2_EVENT_MAX_NUM_EVENTS;
@@ -430,56 +521,44 @@ dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
 	port_conf->disable_implicit_release = 0;
 }
 
-static void
-dpaa2_eventdev_port_release(void *port)
-{
-	EVENTDEV_INIT_FUNC_TRACE();
-
-	RTE_SET_USED(port);
-}
-
 static int
 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
 			  const struct rte_event_port_conf *port_conf)
 {
+	char event_port_name[32];
+	struct dpaa2_port *portal;
+
 	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(port_conf);
 
-	if (!dpaa2_io_portal[port_id].dpio_dev) {
-		dpaa2_io_portal[port_id].dpio_dev =
-				dpaa2_get_qbman_swp(port_id);
-		rte_atomic16_inc(&dpaa2_io_portal[port_id].dpio_dev->ref_count);
-		if (!dpaa2_io_portal[port_id].dpio_dev)
-			return -1;
+	sprintf(event_port_name, "event-port-%d", port_id);
+	portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0);
+	if (!portal) {
+		DPAA2_EVENTDEV_ERR("Memory allocation failure");
+		return -ENOMEM;
 	}
 
-	dpaa2_io_portal[port_id].eventdev = dev;
-	dev->data->ports[port_id] = &dpaa2_io_portal[port_id];
+	memset(portal, 0, sizeof(struct dpaa2_port));
+	dev->data->ports[port_id] = portal;
 	return 0;
 }
 
-static int
-dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
-			   uint8_t queues[], uint16_t nb_unlinks)
+static void
+dpaa2_eventdev_port_release(void *port)
 {
-	struct dpaa2_eventdev *priv = dev->data->dev_private;
-	struct dpaa2_io_portal_t *dpaa2_portal = port;
-	struct dpaa2_eventq *evq_info;
-	int i;
+	struct dpaa2_port *portal = port;
 
 	EVENTDEV_INIT_FUNC_TRACE();
 
-	for (i = 0; i < nb_unlinks; i++) {
-		evq_info = &priv->evq_info[queues[i]];
-		qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
-				   evq_info->dpcon->channel_index, 0);
-		dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
-					0, dpaa2_portal->dpio_dev->token,
-			evq_info->dpcon->dpcon_id);
-	}
+	/* TODO: Cleanup is required when ports are in linked state. */
+	if (portal->is_port_linked)
+		DPAA2_EVENTDEV_WARN("Event port must be unlinked before release");
 
-	return (int)nb_unlinks;
+	if (portal)
+		rte_free(portal);
+
+	portal = NULL;
 }
 
 static int
@@ -488,46 +567,66 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
 			uint16_t nb_links)
 {
 	struct dpaa2_eventdev *priv = dev->data->dev_private;
-	struct dpaa2_io_portal_t *dpaa2_portal = port;
+	struct dpaa2_port *dpaa2_portal = port;
 	struct dpaa2_eventq *evq_info;
-	uint8_t channel_index;
-	int ret, i, n;
+	uint16_t i;
 
 	EVENTDEV_INIT_FUNC_TRACE();
 
+	RTE_SET_USED(priorities);
+
 	for (i = 0; i < nb_links; i++) {
 		evq_info = &priv->evq_info[queues[i]];
+		memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info,
+			   sizeof(struct dpaa2_eventq));
+		dpaa2_portal->evq_info[queues[i]].event_port = port;
+		dpaa2_portal->num_linked_evq++;
+	}
 
-		ret = dpio_add_static_dequeue_channel(
-			dpaa2_portal->dpio_dev->dpio,
-			CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
-			evq_info->dpcon->dpcon_id, &channel_index);
-		if (ret < 0) {
-			DPAA2_EVENTDEV_ERR(
-				"Static dequeue config failed: err(%d)", ret);
-			goto err;
-		}
+	return (int)nb_links;
+}
 
-		qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
-				   channel_index, 1);
-		evq_info->dpcon->channel_index = channel_index;
-	}
+static int
+dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
+			   uint8_t queues[], uint16_t nb_unlinks)
+{
+	struct dpaa2_port *dpaa2_portal = port;
+	int i;
+	struct dpaa2_dpio_dev *dpio_dev = NULL;
+	struct dpaa2_eventq *evq_info;
+	struct qbman_swp *swp;
 
-	RTE_SET_USED(priorities);
+	EVENTDEV_INIT_FUNC_TRACE();
 
-	return (int)nb_links;
-err:
-	for (n = 0; n < i; n++) {
-		evq_info = &priv->evq_info[queues[n]];
-		qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
-				   evq_info->dpcon->channel_index, 0);
-		dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
-					0, dpaa2_portal->dpio_dev->token,
-			evq_info->dpcon->dpcon_id);
+	RTE_SET_USED(dev);
+	RTE_SET_USED(queues);
+
+	for (i = 0; i < nb_unlinks; i++) {
+		evq_info = &dpaa2_portal->evq_info[queues[i]];
+
+		if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) {
+			/* todo dpaa2_portal shall have dpio_dev-no per lcore*/
+			dpio_dev = DPAA2_PER_LCORE_DPIO;
+			swp = DPAA2_PER_LCORE_PORTAL;
+
+			qbman_swp_push_set(swp,
+					evq_info->dpcon->channel_index, 0);
+			dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
+						dpio_dev->token,
+						evq_info->dpcon->dpcon_id);
+		}
+		memset(evq_info, 0, sizeof(struct dpaa2_eventq));
+		if (dpaa2_portal->num_linked_evq)
+			dpaa2_portal->num_linked_evq--;
 	}
-	return ret;
+
+	if (!dpaa2_portal->num_linked_evq)
+		dpaa2_portal->is_port_linked = false;
+
+	return (int)nb_unlinks;
 }
 
+
 static int
 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
 			     uint64_t *timeout_ticks)
@@ -806,6 +905,8 @@ dpaa2_eventdev_create(const char *name)
 		priv->max_event_queues++;
 	} while (dpcon_dev && dpci_dev);
 
+	RTE_LOG(INFO, PMD, "%s eventdev created\n", name);
+
 	return 0;
 fail:
 	return -EFAULT;
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index 8898024..720e0c6 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -62,11 +62,20 @@ struct dpaa2_eventq {
 	struct dpaa2_dpcon_dev *dpcon;
 	/* Attached DPCI device */
 	struct dpaa2_dpci_dev *dpci;
+	/* Mapped event port */
+	struct dpaa2_io_portal_t *event_port;
 	/* Configuration provided by the user */
 	uint32_t event_queue_cfg;
 	uint32_t event_queue_id;
 };
 
+struct dpaa2_port {
+	struct dpaa2_eventq evq_info[DPAA2_EVENT_MAX_QUEUES];
+	uint8_t num_linked_evq;
+	uint8_t is_port_linked;
+	uint64_t timeout_us;
+};
+
 struct dpaa2_eventdev {
 	struct dpaa2_eventq evq_info[DPAA2_EVENT_MAX_QUEUES];
 	uint32_t dequeue_timeout_ns;
-- 
2.7.4

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix mbuf assignment in atomic processing
  2018-09-21 11:46 ` [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix mbuf assignment in atomic processing Hemant Agrawal
                     ` (3 preceding siblings ...)
  2018-09-21 11:46   ` [dpdk-dev] [PATCH v2 5/5] event/dpaa2: affining portal at runtime during I/O Hemant Agrawal
@ 2018-09-23  7:52   ` Jerin Jacob
  4 siblings, 0 replies; 12+ messages in thread
From: Jerin Jacob @ 2018-09-23  7:52 UTC (permalink / raw)
  To: Hemant Agrawal; +Cc: dev, stable

-----Original Message-----
> Date: Fri, 21 Sep 2018 17:16:02 +0530
> From: Hemant Agrawal <hemant.agrawal@nxp.com>
> To: dev@dpdk.org
> CC: jerin.jacob@caviumnetworks.com, Hemant Agrawal
>  <hemant.agrawal@nxp.com>, stable@dpdk.org
> Subject: [PATCH v2 1/5] event/dpaa2: fix mbuf assignment in atomic
>  processing
> X-Mailer: git-send-email 2.7.4
> 
> 
> Fixes: 7b6edb640b73 ("event/dpaa2: have separate structure to hold dqrr entries")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>

Applied the series to dpdk-next-eventdev/master with minor git comment changes. Thanks.

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2018-09-23  8:03 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-30  6:03 [dpdk-dev] [PATCH 1/5] event/dpaa2: fix mbuf assignment in atomic processing Hemant Agrawal
2018-08-30  6:03 ` [dpdk-dev] [PATCH 2/5] event/dpaa2: rename evq info to dpaa2 eventq Hemant Agrawal
2018-08-30  6:03 ` [dpdk-dev] [PATCH 3/5] event/dpaa2: enchance timeout handling Hemant Agrawal
2018-09-10 13:37   ` Jerin Jacob
2018-08-30  6:03 ` [dpdk-dev] [PATCH 4/5] event/dpaa2: support Max event port value Hemant Agrawal
2018-08-30  6:03 ` [dpdk-dev] [PATCH 5/5] event/dpaa2: affining portal at runtime during I/O Hemant Agrawal
2018-09-21 11:46 ` [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix mbuf assignment in atomic processing Hemant Agrawal
2018-09-21 11:46   ` [dpdk-dev] [PATCH v2 2/5] event/dpaa2: rename evq info to dpaa2 eventq Hemant Agrawal
2018-09-21 11:46   ` [dpdk-dev] [PATCH v2 3/5] event/dpaa2: enchance timeout handling Hemant Agrawal
2018-09-21 11:46   ` [dpdk-dev] [PATCH v2 4/5] event/dpaa2: support Max event port value Hemant Agrawal
2018-09-21 11:46   ` [dpdk-dev] [PATCH v2 5/5] event/dpaa2: affining portal at runtime during I/O Hemant Agrawal
2018-09-23  7:52   ` [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix mbuf assignment in atomic processing Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).