DPDK patches and discussions
 help / color / mirror / Atom feed
From: Amit Prakash Shukla <amitprakashs@marvell.com>
To: Amit Prakash Shukla <amitprakashs@marvell.com>,
	Jerin Jacob <jerinj@marvell.com>
Cc: <dev@dpdk.org>, <fengchengwen@huawei.com>,
	<kevin.laatz@intel.com>, <bruce.richardson@intel.com>,
	<conor.walsh@intel.com>, <vattunuru@marvell.com>,
	<g.singh@nxp.com>, <sachin.saxena@oss.nxp.com>,
	<hemant.agrawal@nxp.com>, <cheng1.jiang@intel.com>,
	<ndabilpuram@marvell.com>, <anoobj@marvell.com>,
	<mb@smartsharesystems.com>
Subject: [PATCH v3 05/12] eventdev: add support for DMA adapter service function
Date: Sat, 23 Sep 2023 19:04:42 +0530	[thread overview]
Message-ID: <20230923133449.3780841-6-amitprakashs@marvell.com> (raw)
In-Reply-To: <20230923133449.3780841-1-amitprakashs@marvell.com>

Added support for DMA adapter service function for event devices.
Enqueue and dequeue of event from eventdev and DMA device are done
based on the adapter mode and the supported HW capabilities.

Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
 lib/eventdev/rte_event_dma_adapter.c | 589 +++++++++++++++++++++++++++
 1 file changed, 589 insertions(+)

diff --git a/lib/eventdev/rte_event_dma_adapter.c b/lib/eventdev/rte_event_dma_adapter.c
index dd58188bf3..8349b95796 100644
--- a/lib/eventdev/rte_event_dma_adapter.c
+++ b/lib/eventdev/rte_event_dma_adapter.c
@@ -2,6 +2,8 @@
  * Copyright (c) 2023 Marvell.
  */
 
+#include <rte_service_component.h>
+
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
 #include "rte_event_dma_adapter.h"
@@ -69,6 +71,10 @@ struct dma_device_info {
 
 	/* Number of vchans configured for a DMA device. */
 	uint16_t num_dma_dev_vchan;
+
+	/* Next queue pair to be processed */
+	uint16_t next_vchan_id;
+
 } __rte_cache_aligned;
 
 struct event_dma_adapter {
@@ -90,6 +96,9 @@ struct event_dma_adapter {
 	/* Lock to serialize config updates with service function */
 	rte_spinlock_t lock;
 
+	/* Next dma device to be processed */
+	uint16_t next_dmadev_id;
+
 	/* DMA device structure array */
 	struct dma_device_info *dma_devs;
 
@@ -107,6 +116,26 @@ struct event_dma_adapter {
 
 	/* No. of vchan queue configured */
 	uint16_t nb_vchanq;
+
+	/* Per adapter EAL service ID */
+	uint32_t service_id;
+
+	/* Service initialization state */
+	uint8_t service_initialized;
+
+	/* Max DMA ops processed in any service function invocation */
+	uint32_t max_nb;
+
+	/* Store event port's implicit release capability */
+	uint8_t implicit_release_disabled;
+
+	/* Flag to indicate backpressure at dma_dev
+	 * Stop further dequeuing events from eventdev
+	 */
+	bool stop_enq_to_dma_dev;
+
+	/* Loop counter to flush dma ops */
+	uint16_t transmit_loop_count;
 } __rte_cache_aligned;
 
 static struct event_dma_adapter **event_dma_adapter;
@@ -148,6 +177,18 @@ edma_array_init(void)
 	return 0;
 }
 
+static inline bool
+edma_circular_buffer_batch_ready(struct dma_ops_circular_buffer *bufp)
+{
+	return bufp->count >= DMA_BATCH_SIZE;
+}
+
+static inline bool
+edma_circular_buffer_space_for_batch(struct dma_ops_circular_buffer *bufp)
+{
+	return (bufp->size - bufp->count) >= DMA_BATCH_SIZE;
+}
+
 static inline int
 edma_circular_buffer_init(const char *name, struct dma_ops_circular_buffer *buf, uint16_t sz)
 {
@@ -166,6 +207,67 @@ edma_circular_buffer_free(struct dma_ops_circular_buffer *buf)
 	rte_free(buf->op_buffer);
 }
 
+static inline int
+edma_circular_buffer_add(struct dma_ops_circular_buffer *bufp, struct rte_event_dma_adapter_op *op)
+{
+	uint16_t *tail = &bufp->tail;
+
+	bufp->op_buffer[*tail] = op;
+
+	/* circular buffer, go round */
+	*tail = (*tail + 1) % bufp->size;
+	bufp->count++;
+
+	return 0;
+}
+
+static inline int
+edma_circular_buffer_flush_to_dma_dev(struct event_dma_adapter *adapter,
+				      struct dma_ops_circular_buffer *bufp, uint8_t dma_dev_id,
+				      uint16_t vchan, uint16_t *nb_ops_flushed)
+{
+	struct rte_event_dma_adapter_op *op;
+	struct dma_vchan_info *tq;
+	uint16_t *head = &bufp->head;
+	uint16_t *tail = &bufp->tail;
+	uint16_t n;
+	uint16_t i;
+	int ret;
+
+	if (*tail > *head)
+		n = *tail - *head;
+	else if (*tail < *head)
+		n = bufp->size - *head;
+	else {
+		*nb_ops_flushed = 0;
+		return 0; /* buffer empty */
+	}
+
+	tq = &adapter->dma_devs[dma_dev_id].tqmap[vchan];
+
+	for (i = 0; i < n; i++)	{
+		op = bufp->op_buffer[*head];
+		ret = rte_dma_copy_sg(dma_dev_id, vchan, op->src_seg, op->dst_seg,
+				      op->nb_src, op->nb_dst, op->flags);
+		if (ret < 0)
+			break;
+
+		/* Enqueue in transaction queue. */
+		edma_circular_buffer_add(&tq->dma_buf, op);
+
+		*head = (*head + 1) % bufp->size;
+	}
+
+	*nb_ops_flushed = i;
+	bufp->count -= *nb_ops_flushed;
+	if (!bufp->count) {
+		*head = 0;
+		*tail = 0;
+	}
+
+	return *nb_ops_flushed == n ? 0 : -1;
+}
+
 static int
 edma_default_config_cb(uint8_t id, uint8_t evdev_id, struct rte_event_dma_adapter_conf *conf,
 		       void *arg)
@@ -360,6 +462,406 @@ rte_event_dma_adapter_free(uint8_t id)
 	return 0;
 }
 
+static inline unsigned int
+edma_enq_to_dma_dev(struct event_dma_adapter *adapter, struct rte_event *ev, unsigned int cnt)
+{
+	struct dma_vchan_info *vchan_qinfo = NULL;
+	struct rte_event_dma_adapter_op *dma_op;
+	uint16_t vchan, nb_enqueued = 0;
+	int16_t dma_dev_id;
+	unsigned int i, n;
+	int ret;
+
+	ret = 0;
+	n = 0;
+
+	for (i = 0; i < cnt; i++) {
+		dma_op = ev[i].event_ptr;
+		if (dma_op == NULL)
+			continue;
+
+		/* Expected to have response info appended to dma_op. */
+
+		dma_dev_id = dma_op->dma_dev_id;
+		vchan = dma_op->vchan;
+		vchan_qinfo = &adapter->dma_devs[dma_dev_id].vchanq[vchan];
+		if (!vchan_qinfo->vq_enabled) {
+			if (dma_op != NULL && dma_op->op_mp != NULL)
+				rte_mempool_put(dma_op->op_mp, dma_op);
+			continue;
+		}
+		edma_circular_buffer_add(&vchan_qinfo->dma_buf, dma_op);
+
+		if (edma_circular_buffer_batch_ready(&vchan_qinfo->dma_buf)) {
+			ret = edma_circular_buffer_flush_to_dma_dev(adapter, &vchan_qinfo->dma_buf,
+								    dma_dev_id, vchan,
+								    &nb_enqueued);
+			n += nb_enqueued;
+
+			/**
+			 * If some dma ops failed to flush to dma_dev and
+			 * space for another batch is not available, stop
+			 * dequeue from eventdev momentarily
+			 */
+			if (unlikely(ret < 0 &&
+				     !edma_circular_buffer_space_for_batch(&vchan_qinfo->dma_buf)))
+				adapter->stop_enq_to_dma_dev = true;
+		}
+	}
+
+	return n;
+}
+
+static unsigned int
+edma_adapter_dev_flush(struct event_dma_adapter *adapter, int16_t dma_dev_id,
+		       uint16_t *nb_ops_flushed)
+{
+	struct dma_vchan_info *vchan_info;
+	struct dma_device_info *dev_info;
+	uint16_t nb = 0, nb_enqueued = 0;
+	uint16_t vchan, nb_vchans;
+
+	dev_info = &adapter->dma_devs[dma_dev_id];
+	nb_vchans = dev_info->num_vchanq;
+
+	for (vchan = 0; vchan < nb_vchans; vchan++) {
+
+		vchan_info = &dev_info->vchanq[vchan];
+		if (unlikely(vchan_info == NULL || !vchan_info->vq_enabled))
+			continue;
+
+		edma_circular_buffer_flush_to_dma_dev(adapter, &vchan_info->dma_buf, dma_dev_id,
+						      vchan, &nb_enqueued);
+		*nb_ops_flushed += vchan_info->dma_buf.count;
+		nb += nb_enqueued;
+	}
+
+	return nb;
+}
+
+static unsigned int
+edma_adapter_enq_flush(struct event_dma_adapter *adapter)
+{
+	int16_t dma_dev_id;
+	uint16_t nb_enqueued = 0;
+	uint16_t nb_ops_flushed = 0;
+	uint16_t num_dma_dev = rte_dma_count_avail();
+
+	for (dma_dev_id = 0; dma_dev_id < num_dma_dev; dma_dev_id++)
+		nb_enqueued += edma_adapter_dev_flush(adapter, dma_dev_id, &nb_ops_flushed);
+	/**
+	 * Enable dequeue from eventdev if all ops from circular
+	 * buffer flushed to dma_dev
+	 */
+	if (!nb_ops_flushed)
+		adapter->stop_enq_to_dma_dev = false;
+
+	return nb_enqueued;
+}
+
+/* Flush an instance's enqueue buffers every DMA_ENQ_FLUSH_THRESHOLD
+ * iterations of edma_adapter_enq_run()
+ */
+#define DMA_ENQ_FLUSH_THRESHOLD 1024
+
+static int
+edma_adapter_enq_run(struct event_dma_adapter *adapter, unsigned int max_enq)
+{
+	uint8_t event_port_id = adapter->event_port_id;
+	uint8_t event_dev_id = adapter->eventdev_id;
+	struct rte_event ev[DMA_BATCH_SIZE];
+	unsigned int nb_enq, nb_enqueued;
+	uint16_t n;
+
+	if (adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_NEW)
+		return 0;
+
+	nb_enqueued = 0;
+	for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
+
+		if (unlikely(adapter->stop_enq_to_dma_dev)) {
+			nb_enqueued += edma_adapter_enq_flush(adapter);
+
+			if (unlikely(adapter->stop_enq_to_dma_dev))
+				break;
+		}
+
+		n = rte_event_dequeue_burst(event_dev_id, event_port_id, ev, DMA_BATCH_SIZE, 0);
+
+		if (!n)
+			break;
+
+		nb_enqueued += edma_enq_to_dma_dev(adapter, ev, n);
+	}
+
+	if ((++adapter->transmit_loop_count & (DMA_ENQ_FLUSH_THRESHOLD - 1)) == 0)
+		nb_enqueued += edma_adapter_enq_flush(adapter);
+
+	return nb_enqueued;
+}
+
+#define DMA_ADAPTER_MAX_EV_ENQ_RETRIES 100
+
+static inline uint16_t
+edma_ops_enqueue_burst(struct event_dma_adapter *adapter, struct rte_event_dma_adapter_op **ops,
+		       uint16_t num)
+{
+	uint8_t event_port_id = adapter->event_port_id;
+	uint8_t event_dev_id = adapter->eventdev_id;
+	struct rte_event events[DMA_BATCH_SIZE];
+	struct rte_event *response_info;
+	uint16_t nb_enqueued, nb_ev;
+	uint8_t retry;
+	uint8_t i;
+
+	nb_ev = 0;
+	retry = 0;
+	nb_enqueued = 0;
+	num = RTE_MIN(num, DMA_BATCH_SIZE);
+	for (i = 0; i < num; i++) {
+		struct rte_event *ev = &events[nb_ev++];
+
+		/* Expected to have response info appended to dma_op. */
+		response_info = (struct rte_event *)((uint8_t *)ops[i] +
+							  sizeof(struct rte_event_dma_adapter_op));
+		if (unlikely(response_info == NULL)) {
+			if (ops[i] != NULL && ops[i]->op_mp != NULL)
+				rte_mempool_put(ops[i]->op_mp, ops[i]);
+			continue;
+		}
+
+		rte_memcpy(ev, response_info, sizeof(struct rte_event));
+		ev->event_ptr = ops[i];
+		ev->event_type = RTE_EVENT_TYPE_DMADEV;
+		if (adapter->implicit_release_disabled)
+			ev->op = RTE_EVENT_OP_FORWARD;
+		else
+			ev->op = RTE_EVENT_OP_NEW;
+	}
+
+	do {
+		nb_enqueued += rte_event_enqueue_burst(event_dev_id, event_port_id,
+						       &events[nb_enqueued], nb_ev - nb_enqueued);
+
+	} while (retry++ < DMA_ADAPTER_MAX_EV_ENQ_RETRIES && nb_enqueued < nb_ev);
+
+	return nb_enqueued;
+}
+
+static int
+edma_circular_buffer_flush_to_evdev(struct event_dma_adapter *adapter,
+				    struct dma_ops_circular_buffer *bufp,
+				    uint16_t *enqueue_count)
+{
+	struct rte_event_dma_adapter_op **ops = bufp->op_buffer;
+	uint16_t n = 0, nb_ops_flushed;
+	uint16_t *head = &bufp->head;
+	uint16_t *tail = &bufp->tail;
+
+	if (*tail > *head)
+		n = *tail - *head;
+	else if (*tail < *head)
+		n = bufp->size - *head;
+	else {
+		if (enqueue_count)
+			*enqueue_count = 0;
+		return 0; /* buffer empty */
+	}
+
+	if (enqueue_count && n > *enqueue_count)
+		n = *enqueue_count;
+
+	nb_ops_flushed = edma_ops_enqueue_burst(adapter, &ops[*head], n);
+	if (enqueue_count)
+		*enqueue_count = nb_ops_flushed;
+
+	bufp->count -= nb_ops_flushed;
+	if (!bufp->count) {
+		*head = 0;
+		*tail = 0;
+		return 0; /* buffer empty */
+	}
+
+	*head = (*head + nb_ops_flushed) % bufp->size;
+	return 1;
+}
+
+static void
+edma_ops_buffer_flush(struct event_dma_adapter *adapter)
+{
+	if (likely(adapter->ebuf.count == 0))
+		return;
+
+	while (edma_circular_buffer_flush_to_evdev(adapter, &adapter->ebuf, NULL))
+		;
+}
+
+static inline unsigned int
+edma_adapter_deq_run(struct event_dma_adapter *adapter, unsigned int max_deq)
+{
+	struct dma_vchan_info *vchan_info;
+	struct dma_ops_circular_buffer *tq_buf;
+	struct rte_event_dma_adapter_op *ops;
+	uint16_t n, nb_deq, nb_enqueued, i;
+	struct dma_device_info *dev_info;
+	uint16_t vchan, num_vchan;
+	uint16_t num_dma_dev;
+	int16_t dma_dev_id;
+	uint16_t index;
+	bool done;
+	bool err;
+
+	nb_deq = 0;
+	edma_ops_buffer_flush(adapter);
+
+	num_dma_dev = rte_dma_count_avail();
+	do {
+		done = true;
+
+		for (dma_dev_id = adapter->next_dmadev_id; dma_dev_id < num_dma_dev; dma_dev_id++) {
+			uint16_t queues = 0;
+			dev_info = &adapter->dma_devs[dma_dev_id];
+			num_vchan = dev_info->num_vchanq;
+
+			for (vchan = dev_info->next_vchan_id; queues < num_vchan;
+			     vchan = (vchan + 1) % num_vchan, queues++) {
+
+				vchan_info = &dev_info->vchanq[vchan];
+				if (unlikely(vchan_info == NULL || !vchan_info->vq_enabled))
+					continue;
+
+				n = rte_dma_completed(dma_dev_id, vchan, DMA_BATCH_SIZE,
+						&index, &err);
+				if (!n)
+					continue;
+
+				done = false;
+
+				tq_buf = &dev_info->tqmap[vchan].dma_buf;
+
+				nb_enqueued = n;
+				if (unlikely(!adapter->ebuf.count))
+					edma_circular_buffer_flush_to_evdev(adapter, tq_buf,
+									    &nb_enqueued);
+
+				if (likely(nb_enqueued == n))
+					goto check;
+
+				/* Failed to enqueue events case */
+				for (i = nb_enqueued; i < n; i++) {
+					ops = tq_buf->op_buffer[tq_buf->head];
+					edma_circular_buffer_add(&adapter->ebuf, ops);
+					tq_buf->head = (tq_buf->head + 1) % tq_buf->size;
+				}
+
+check:
+				nb_deq += n;
+				if (nb_deq >= max_deq) {
+					if ((vchan + 1) == num_vchan)
+						adapter->next_dmadev_id =
+								(dma_dev_id + 1) % num_dma_dev;
+
+					dev_info->next_vchan_id = (vchan + 1) % num_vchan;
+
+					return nb_deq;
+				}
+			}
+		}
+		adapter->next_dmadev_id = 0;
+
+	} while (done == false);
+
+	return nb_deq;
+}
+
+static int
+edma_adapter_run(struct event_dma_adapter *adapter, unsigned int max_ops)
+{
+	unsigned int ops_left = max_ops;
+
+	while (ops_left > 0) {
+		unsigned int e_cnt, d_cnt;
+
+		e_cnt = edma_adapter_deq_run(adapter, ops_left);
+		ops_left -= RTE_MIN(ops_left, e_cnt);
+
+		d_cnt = edma_adapter_enq_run(adapter, ops_left);
+		ops_left -= RTE_MIN(ops_left, d_cnt);
+
+		if (e_cnt == 0 && d_cnt == 0)
+			break;
+	}
+
+	if (ops_left == max_ops) {
+		rte_event_maintain(adapter->eventdev_id, adapter->event_port_id, 0);
+		return -EAGAIN;
+	} else
+		return 0;
+}
+
+static int
+edma_service_func(void *args)
+{
+	struct event_dma_adapter *adapter = args;
+	int ret;
+
+	if (rte_spinlock_trylock(&adapter->lock) == 0)
+		return 0;
+	ret = edma_adapter_run(adapter, adapter->max_nb);
+	rte_spinlock_unlock(&adapter->lock);
+
+	return ret;
+}
+
+static int
+edma_init_service(struct event_dma_adapter *adapter, uint8_t id)
+{
+	struct rte_event_dma_adapter_conf adapter_conf;
+	struct rte_service_spec service;
+	uint32_t impl_rel;
+	int ret;
+
+	if (adapter->service_initialized)
+		return 0;
+
+	memset(&service, 0, sizeof(service));
+	snprintf(service.name, DMA_ADAPTER_NAME_LEN, "rte_event_dma_adapter_%d", id);
+	service.socket_id = adapter->socket_id;
+	service.callback = edma_service_func;
+	service.callback_userdata = adapter;
+
+	/* Service function handles locking for queue add/del updates */
+	service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
+	ret = rte_service_component_register(&service, &adapter->service_id);
+	if (ret) {
+		RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32, service.name, ret);
+		return ret;
+	}
+
+	ret = adapter->conf_cb(id, adapter->eventdev_id, &adapter_conf, adapter->conf_arg);
+	if (ret) {
+		RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32, ret);
+		return ret;
+	}
+
+	adapter->max_nb = adapter_conf.max_nb;
+	adapter->event_port_id = adapter_conf.event_port_id;
+
+	if (rte_event_port_attr_get(adapter->eventdev_id, adapter->event_port_id,
+				    RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE, &impl_rel)) {
+		RTE_EDEV_LOG_ERR("Failed to get port info for eventdev %" PRId32,
+				 adapter->eventdev_id);
+		edma_circular_buffer_free(&adapter->ebuf);
+		rte_free(adapter);
+		return -EINVAL;
+	}
+
+	adapter->implicit_release_disabled = (uint8_t)impl_rel;
+	adapter->service_initialized = 1;
+
+	return ret;
+}
+
 static void
 edma_update_vchanq_info(struct event_dma_adapter *adapter, struct dma_device_info *dev_info,
 			uint16_t vchan, uint8_t add)
@@ -391,6 +893,60 @@ edma_update_vchanq_info(struct event_dma_adapter *adapter, struct dma_device_inf
 	}
 }
 
+static int
+edma_add_vchan(struct event_dma_adapter *adapter, int16_t dma_dev_id, uint16_t vchan)
+{
+	struct dma_device_info *dev_info = &adapter->dma_devs[dma_dev_id];
+	struct dma_vchan_info *vchanq;
+	struct dma_vchan_info *tqmap;
+	uint16_t nb_vchans;
+	uint32_t i;
+
+	if (dev_info->vchanq == NULL) {
+		nb_vchans = dev_info->num_dma_dev_vchan;
+
+		dev_info->vchanq = rte_zmalloc_socket(adapter->mem_name,
+				nb_vchans * sizeof(struct dma_vchan_info),
+				0, adapter->socket_id);
+		if (dev_info->vchanq == NULL)
+			return -ENOMEM;
+
+		dev_info->tqmap = rte_zmalloc_socket(adapter->mem_name,
+				nb_vchans * sizeof(struct dma_vchan_info),
+				0, adapter->socket_id);
+		if (dev_info->tqmap == NULL)
+			return -ENOMEM;
+
+		for (i = 0; i < nb_vchans; i++) {
+			vchanq = &dev_info->vchanq[i];
+
+			if (edma_circular_buffer_init("dma_dev_circular_buffer", &vchanq->dma_buf,
+						DMA_ADAPTER_OPS_BUFFER_SIZE)) {
+				RTE_EDEV_LOG_ERR("Failed to get memory for dma_dev buffer");
+				rte_free(vchanq);
+				return -ENOMEM;
+			}
+
+			tqmap = &dev_info->tqmap[i];
+			if (edma_circular_buffer_init("dma_dev_circular_trans_buf", &tqmap->dma_buf,
+						DMA_ADAPTER_OPS_BUFFER_SIZE)) {
+				RTE_EDEV_LOG_ERR(
+					"Failed to get memory for dma_dev transaction buffer");
+				rte_free(tqmap);
+				return -ENOMEM;
+			}
+		}
+	}
+
+	if (vchan == RTE_DMA_ALL_VCHAN) {
+		for (i = 0; i < dev_info->num_dma_dev_vchan; i++)
+			edma_update_vchanq_info(adapter, dev_info, i, 1);
+	} else
+		edma_update_vchanq_info(adapter, dev_info, vchan, 1);
+
+	return 0;
+}
+
 int
 rte_event_dma_adapter_vchan_add(uint8_t id, int16_t dma_dev_id, uint16_t vchan,
 				const struct rte_event *event)
@@ -470,6 +1026,38 @@ rte_event_dma_adapter_vchan_add(uint8_t id, int16_t dma_dev_id, uint16_t vchan,
 			edma_update_vchanq_info(adapter, &adapter->dma_devs[dma_dev_id], vchan, 1);
 	}
 
+	/* In case HW cap is RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW, or SW adapter, initiate
+	 * services so the application can choose which ever way it wants to use the adapter.
+	 *
+	 * Case 1: RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW. Application may wants to use one
+	 * of below two modes
+	 *
+	 * a. OP_FORWARD mode -> HW Dequeue + SW enqueue
+	 * b. OP_NEW mode -> HW Dequeue
+	 *
+	 * Case 2: No HW caps, use SW adapter
+	 *
+	 * a. OP_FORWARD mode -> SW enqueue & dequeue
+	 * b. OP_NEW mode -> SW Dequeue
+	 */
+	if ((cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
+	     !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
+	     adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_FORWARD) ||
+	    (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
+	     !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
+	     !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND))) {
+		rte_spinlock_lock(&adapter->lock);
+		ret = edma_init_service(adapter, id);
+		if (ret == 0)
+			ret = edma_add_vchan(adapter, dma_dev_id, vchan);
+		rte_spinlock_unlock(&adapter->lock);
+
+		if (ret)
+			return ret;
+
+		rte_service_component_runstate_set(adapter->service_id, 1);
+	}
+
 	return 0;
 }
 
@@ -533,6 +1121,7 @@ rte_event_dma_adapter_vchan_del(uint8_t id, int16_t dma_dev_id, uint16_t vchan)
 		}
 
 		rte_spinlock_unlock(&adapter->lock);
+		rte_service_component_runstate_set(adapter->service_id, adapter->nb_vchanq);
 	}
 
 	return ret;
-- 
2.25.1


  parent reply	other threads:[~2023-09-23 13:35 UTC|newest]

Thread overview: 110+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-09-19 13:42 [PATCH v1 1/7] eventdev: introduce DMA event adapter library Amit Prakash Shukla
2023-09-19 13:42 ` [PATCH v1 2/7] eventdev: api to get DMA capabilities Amit Prakash Shukla
2023-09-19 13:42 ` [PATCH v1 3/7] eventdev: add DMA adapter implementation Amit Prakash Shukla
2023-09-19 13:42 ` [PATCH v1 4/7] app/test: add event DMA adapter auto-test Amit Prakash Shukla
2023-09-19 13:42 ` [PATCH v1 5/7] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
2023-09-19 13:42 ` [PATCH v1 6/7] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
2023-09-19 13:42 ` [PATCH v1 7/7] event/cnxk: support DMA event functions Amit Prakash Shukla
2023-09-21  2:41 ` [PATCH v1 1/7] eventdev: introduce DMA event adapter library Jerin Jacob
2023-09-21  6:42   ` [EXT] " Amit Prakash Shukla
2023-09-22 20:13 ` [PATCH v2 00/12] event DMA adapter library support Amit Prakash Shukla
2023-09-22 20:13   ` [PATCH v2 01/12] eventdev: introduce event DMA adapter library Amit Prakash Shukla
2023-09-22 20:13   ` [PATCH v2 02/12] eventdev: api to get DMA adapter capabilities Amit Prakash Shukla
2023-09-22 20:13   ` [PATCH v2 03/12] eventdev: add DMA adapter API to create and free Amit Prakash Shukla
2023-09-22 20:13   ` [PATCH v2 04/12] eventdev: api support for vchan add and delete Amit Prakash Shukla
2023-09-22 20:13   ` [PATCH v2 05/12] eventdev: add support for service function Amit Prakash Shukla
2023-09-22 20:13   ` [PATCH v2 06/12] eventdev: api support for DMA adapter start stop Amit Prakash Shukla
2023-09-22 20:13   ` [PATCH v2 07/12] eventdev: api support to get DMA adapter service ID Amit Prakash Shukla
2023-09-22 20:13   ` [PATCH v2 08/12] eventdev: add DMA adapter support for runtime params Amit Prakash Shukla
2023-09-22 20:13   ` [PATCH v2 09/12] eventdev: add support for DMA adapter stats Amit Prakash Shukla
2023-09-22 20:13   ` [PATCH v2 10/12] eventdev: add support for DMA adapter enqueue Amit Prakash Shukla
2023-09-22 20:13   ` [PATCH v2 11/12] eventdev: add DMA adapter port get Amit Prakash Shukla
2023-09-22 20:13   ` [PATCH v2 12/12] app/test: add event DMA adapter auto-test Amit Prakash Shukla
2023-09-23 13:34   ` [PATCH v3 00/12] event DMA adapter library support Amit Prakash Shukla
2023-09-23 13:34     ` [PATCH v3 01/12] eventdev: introduce event DMA adapter library Amit Prakash Shukla
2023-09-23 13:34     ` [PATCH v3 02/12] eventdev: api to get DMA adapter capabilities Amit Prakash Shukla
2023-09-23 13:34     ` [PATCH v3 03/12] eventdev: create and free API for DMA adapter Amit Prakash Shukla
2023-09-23 13:34     ` [PATCH v3 04/12] eventdev: add API support for vchan add and delete Amit Prakash Shukla
2023-09-23 13:34     ` Amit Prakash Shukla [this message]
2023-09-23 13:34     ` [PATCH v3 06/12] eventdev: add support for DMA adapter start and stop Amit Prakash Shukla
2023-09-23 13:34     ` [PATCH v3 07/12] eventdev: add support for DMA adapter service ID get Amit Prakash Shukla
2023-09-23 13:34     ` [PATCH v3 08/12] eventdev: add DMA adapter support for runtime params Amit Prakash Shukla
2023-09-23 13:34     ` [PATCH v3 09/12] eventdev: add support for DMA adapter stats Amit Prakash Shukla
2023-09-23 13:34     ` [PATCH v3 10/12] eventdev: add support for DMA adapter enqueue Amit Prakash Shukla
2023-09-23 13:34     ` [PATCH v3 11/12] eventdev: add DMA adapter event port get Amit Prakash Shukla
2023-09-23 13:34     ` [PATCH v3 12/12] app/test: add event DMA adapter auto-test Amit Prakash Shukla
2023-09-26 10:32     ` [PATCH v4 00/12] event DMA adapter library support Amit Prakash Shukla
2023-09-26 10:32       ` [PATCH v4 01/12] eventdev: introduce event DMA adapter library Amit Prakash Shukla
2023-09-27 18:12         ` Jerin Jacob
2023-09-27 20:45           ` Thomas Monjalon
2023-09-28  4:04             ` Jerin Jacob
2023-09-26 10:32       ` [PATCH v4 02/12] eventdev: api to get DMA adapter capabilities Amit Prakash Shukla
2023-09-27 18:20         ` Jerin Jacob
2023-09-26 10:32       ` [PATCH v4 03/12] eventdev: create and free API for DMA adapter Amit Prakash Shukla
2023-09-27 18:23         ` Jerin Jacob
2023-09-26 10:32       ` [PATCH v4 04/12] eventdev: add API support for vchan add and delete Amit Prakash Shukla
2023-09-26 10:32       ` [PATCH v4 05/12] eventdev: add support for DMA adapter service function Amit Prakash Shukla
2023-09-26 10:32       ` [PATCH v4 06/12] eventdev: add support for DMA adapter start and stop Amit Prakash Shukla
2023-09-26 10:32       ` [PATCH v4 07/12] eventdev: add support for DMA adapter service ID get Amit Prakash Shukla
2023-09-26 10:32       ` [PATCH v4 08/12] eventdev: add DMA adapter support for runtime params Amit Prakash Shukla
2023-09-26 10:32       ` [PATCH v4 09/12] eventdev: add support for DMA adapter stats Amit Prakash Shukla
2023-09-26 10:32       ` [PATCH v4 10/12] eventdev: add support for DMA adapter enqueue Amit Prakash Shukla
2023-09-26 10:32       ` [PATCH v4 11/12] eventdev: add DMA adapter event port get Amit Prakash Shukla
2023-09-26 10:32       ` [PATCH v4 12/12] app/test: add event DMA adapter auto-test Amit Prakash Shukla
2023-09-27 18:27         ` Jerin Jacob
2023-09-28 10:36       ` [PATCH v5 00/12] event DMA adapter library support Amit Prakash Shukla
2023-09-28 10:36         ` [PATCH v5 01/12] eventdev/dma: introduce DMA adapter Amit Prakash Shukla
2023-09-28 15:33           ` Jerin Jacob
2023-09-28 10:36         ` [PATCH v5 02/12] eventdev/dma: support adapter capabilities get Amit Prakash Shukla
2023-09-28 10:36         ` [PATCH v5 03/12] eventdev/dma: support adapter create and free Amit Prakash Shukla
2023-09-28 10:36         ` [PATCH v5 04/12] eventdev/dma: support for vchan add and delete Amit Prakash Shukla
2023-09-28 10:36         ` [PATCH v5 05/12] eventdev/dma: support for adapter service function Amit Prakash Shukla
2023-09-28 10:36         ` [PATCH v5 06/12] eventdev/dma: support for adapter start and stop Amit Prakash Shukla
2023-09-28 10:36         ` [PATCH v5 07/12] eventdev/dma: support for adapter service ID get Amit Prakash Shukla
2023-09-28 10:36         ` [PATCH v5 08/12] eventdev/dma: support adapter runtime params Amit Prakash Shukla
2023-09-28 10:36         ` [PATCH v5 09/12] eventdev/dma: support for adapter stats Amit Prakash Shukla
2023-09-28 10:36         ` [PATCH v5 10/12] eventdev/dma: support for adapter enqueue Amit Prakash Shukla
2023-09-28 10:36         ` [PATCH v5 11/12] eventdev/dma: support for adapter event port get Amit Prakash Shukla
2023-09-28 10:36         ` [PATCH v5 12/12] app/test: add event DMA adapter auto-test Amit Prakash Shukla
2023-09-29  7:20           ` Jerin Jacob
2023-09-28 16:49         ` [PATCH v6 00/12] event DMA adapter library support Amit Prakash Shukla
2023-09-28 16:49           ` [PATCH v6 01/12] eventdev/dma: introduce DMA adapter Amit Prakash Shukla
2023-09-28 16:49           ` [PATCH v6 02/12] eventdev/dma: support adapter capabilities get Amit Prakash Shukla
2023-09-28 16:49           ` [PATCH v6 03/12] eventdev/dma: support adapter create and free Amit Prakash Shukla
2023-09-28 16:49           ` [PATCH v6 04/12] eventdev/dma: support vchan add and delete Amit Prakash Shukla
2023-09-28 16:49           ` [PATCH v6 05/12] eventdev/dma: support adapter service function Amit Prakash Shukla
2023-09-28 16:49           ` [PATCH v6 06/12] eventdev/dma: support adapter start and stop Amit Prakash Shukla
2023-09-28 16:49           ` [PATCH v6 07/12] eventdev/dma: support adapter service ID get Amit Prakash Shukla
2023-09-28 16:49           ` [PATCH v6 08/12] eventdev/dma: support adapter runtime params Amit Prakash Shukla
2023-09-28 16:49           ` [PATCH v6 09/12] eventdev/dma: support adapter stats Amit Prakash Shukla
2023-09-28 16:49           ` [PATCH v6 10/12] eventdev/dma: support adapter enqueue Amit Prakash Shukla
2023-09-28 16:49           ` [PATCH v6 11/12] eventdev/dma: support adapter event port get Amit Prakash Shukla
2023-09-28 16:49           ` [PATCH v6 12/12] app/test: add event DMA adapter auto-test Amit Prakash Shukla
2023-09-29  8:12           ` [PATCH v7 00/12] event DMA adapter library support Amit Prakash Shukla
2023-09-29  8:12             ` [PATCH v7 01/12] eventdev/dma: introduce DMA adapter Amit Prakash Shukla
2023-09-29  8:12             ` [PATCH v7 02/12] eventdev/dma: support adapter capabilities get Amit Prakash Shukla
2023-09-29  8:13             ` [PATCH v7 03/12] eventdev/dma: support adapter create and free Amit Prakash Shukla
2023-09-29  8:13             ` [PATCH v7 04/12] eventdev/dma: support vchan add and delete Amit Prakash Shukla
2023-09-29  8:13             ` [PATCH v7 05/12] eventdev/dma: support adapter service function Amit Prakash Shukla
2023-09-29  8:13             ` [PATCH v7 06/12] eventdev/dma: support adapter start and stop Amit Prakash Shukla
2023-09-29  8:13             ` [PATCH v7 07/12] eventdev/dma: support adapter service ID get Amit Prakash Shukla
2023-09-29  8:13             ` [PATCH v7 08/12] eventdev/dma: support adapter runtime params Amit Prakash Shukla
2023-09-29  8:13             ` [PATCH v7 09/12] eventdev/dma: support adapter stats Amit Prakash Shukla
2023-09-29  8:13             ` [PATCH v7 10/12] eventdev/dma: support adapter enqueue Amit Prakash Shukla
2023-09-29  8:13             ` [PATCH v7 11/12] eventdev/dma: support adapter event port get Amit Prakash Shukla
2023-09-29  8:13             ` [PATCH v7 12/12] app/test: add event DMA adapter auto-test Amit Prakash Shukla
2023-09-29 11:50             ` [PATCH v8 00/12] event DMA adapter library support Amit Prakash Shukla
2023-09-29 11:50               ` [PATCH v8 01/12] eventdev/dma: introduce DMA adapter Amit Prakash Shukla
2023-09-29 11:50               ` [PATCH v8 02/12] eventdev/dma: support adapter capabilities get Amit Prakash Shukla
2023-09-29 11:50               ` [PATCH v8 03/12] eventdev/dma: support adapter create and free Amit Prakash Shukla
2023-09-29 11:50               ` [PATCH v8 04/12] eventdev/dma: support vchan add and delete Amit Prakash Shukla
2023-09-29 11:50               ` [PATCH v8 05/12] eventdev/dma: support adapter service function Amit Prakash Shukla
2023-09-29 11:50               ` [PATCH v8 06/12] eventdev/dma: support adapter start and stop Amit Prakash Shukla
2023-09-29 11:50               ` [PATCH v8 07/12] eventdev/dma: support adapter service ID get Amit Prakash Shukla
2023-09-29 11:50               ` [PATCH v8 08/12] eventdev/dma: support adapter runtime params Amit Prakash Shukla
2023-09-29 11:50               ` [PATCH v8 09/12] eventdev/dma: support adapter stats Amit Prakash Shukla
2023-09-29 11:50               ` [PATCH v8 10/12] eventdev/dma: support adapter enqueue Amit Prakash Shukla
2023-09-29 11:50               ` [PATCH v8 11/12] eventdev/dma: support adapter event port get Amit Prakash Shukla
2023-09-29 11:50               ` [PATCH v8 12/12] app/test: add event DMA adapter auto-test Amit Prakash Shukla
2023-10-03  5:13               ` [PATCH v8 00/12] event DMA adapter library support Jerin Jacob
2023-09-26  5:06   ` [PATCH v2 " Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230923133449.3780841-6-amitprakashs@marvell.com \
    --to=amitprakashs@marvell.com \
    --cc=anoobj@marvell.com \
    --cc=bruce.richardson@intel.com \
    --cc=cheng1.jiang@intel.com \
    --cc=conor.walsh@intel.com \
    --cc=dev@dpdk.org \
    --cc=fengchengwen@huawei.com \
    --cc=g.singh@nxp.com \
    --cc=hemant.agrawal@nxp.com \
    --cc=jerinj@marvell.com \
    --cc=kevin.laatz@intel.com \
    --cc=mb@smartsharesystems.com \
    --cc=ndabilpuram@marvell.com \
    --cc=sachin.saxena@oss.nxp.com \
    --cc=vattunuru@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).