From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id C8D2742659;
	Thu, 28 Sep 2023 18:50:44 +0200 (CEST)
Received: from mails.dpdk.org (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id A218840A7D;
	Thu, 28 Sep 2023 18:50:39 +0200 (CEST)
Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com
 [67.231.148.174])
 by mails.dpdk.org (Postfix) with ESMTP id 3C382402DD
 for <dev@dpdk.org>; Thu, 28 Sep 2023 18:50:38 +0200 (CEST)
Received: from pps.filterd (m0045849.ppops.net [127.0.0.1])
 by mx0a-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id
 38SAfcA7003479; Thu, 28 Sep 2023 09:50:37 -0700
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;
 h=from : to : cc :
 subject : date : message-id : in-reply-to : references : mime-version :
 content-transfer-encoding : content-type; s=pfpt0220;
 bh=/mgE0ZxQDwnYyZ4ZfHx8aUbGHKBzMIh6NfgO0u8XwOE=;
 b=DZOqi/opVzI+N6/JIBSAtCzcDBTPnjBovmy/sils2R4fEF5E/n5f+H+fd/DTmQz+ufgJ
 nuh7+H+n94HmMOLTPnA0351TpDyAqJHo2mtUG31K/mIY63KTgORsY29+n2lHiDTvGjm2
 fd2TL9RMbK4qqE9b3UqSHwceRCT9YFs+wzpUIGsEIDvI9Nac63A5OOGS30nzgNtO1NO5
 NcdA83xEvyrK9GcXrQnHKkHX32h/YM4XIp1v/5CdPF0IxxOeIfGfyUlAaZtPOErulNK8
 3MFvGR8IuPMnrKmDPKWI4e1I2uHFsYLaa2Wz4d8ddPkwtiiQ53OYr0jNqX6kv0BsxTGb eQ== 
Received: from dc5-exch01.marvell.com ([199.233.59.181])
 by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3td7y6sd9m-1
 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT);
 Thu, 28 Sep 2023 09:50:37 -0700
Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH01.marvell.com
 (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.48;
 Thu, 28 Sep 2023 09:50:35 -0700
Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com
 (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.48 via Frontend
 Transport; Thu, 28 Sep 2023 09:50:35 -0700
Received: from localhost.localdomain (unknown [10.28.36.157])
 by maili.marvell.com (Postfix) with ESMTP id 691FB5C68F4;
 Thu, 28 Sep 2023 09:50:31 -0700 (PDT)
From: Amit Prakash Shukla <amitprakashs@marvell.com>
To: Amit Prakash Shukla <amitprakashs@marvell.com>, Jerin Jacob
 <jerinj@marvell.com>
CC: <dev@dpdk.org>, <fengchengwen@huawei.com>, <kevin.laatz@intel.com>,
 <bruce.richardson@intel.com>, <conor.walsh@intel.com>,
 <vattunuru@marvell.com>, <g.singh@nxp.com>,
 <sachin.saxena@oss.nxp.com>, <hemant.agrawal@nxp.com>,
 <cheng1.jiang@intel.com>, <ndabilpuram@marvell.com>,
 <anoobj@marvell.com>, <mb@smartsharesystems.com>
Subject: [PATCH v6 04/12] eventdev/dma: support vchan add and delete
Date: Thu, 28 Sep 2023 22:19:50 +0530
Message-ID: <20230928164959.340575-5-amitprakashs@marvell.com>
X-Mailer: git-send-email 2.25.1
In-Reply-To: <20230928164959.340575-1-amitprakashs@marvell.com>
References: <20230928103623.216287-1-amitprakashs@marvell.com>
 <20230928164959.340575-1-amitprakashs@marvell.com>
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
Content-Type: text/plain
X-Proofpoint-ORIG-GUID: Bz52Ev5rTPlZ2xcLezcjedxPaFhNWPfy
X-Proofpoint-GUID: Bz52Ev5rTPlZ2xcLezcjedxPaFhNWPfy
X-Proofpoint-Virus-Version: vendor=baseguard
 engine=ICAP:2.0.267,Aquarius:18.0.980,Hydra:6.0.619,FMLib:17.11.176.26
 definitions=2023-09-28_16,2023-09-28_03,2023-05-22_02
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

Added API support to add and delete vchan's from the DMA adapter. DMA devid
and vchan are added to the addapter instance by calling
rte_event_dma_adapter_vchan_add and deleted using
rte_event_dma_adapter_vchan_del.

Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
 lib/eventdev/rte_event_dma_adapter.c | 204 +++++++++++++++++++++++++++
 1 file changed, 204 insertions(+)

diff --git a/lib/eventdev/rte_event_dma_adapter.c b/lib/eventdev/rte_event_dma_adapter.c
index e57d8407cb..ec81281bf8 100644
--- a/lib/eventdev/rte_event_dma_adapter.c
+++ b/lib/eventdev/rte_event_dma_adapter.c
@@ -42,8 +42,31 @@ struct dma_ops_circular_buffer {
 	struct rte_event_dma_adapter_op **op_buffer;
 } __rte_cache_aligned;
 
+/* Vchan information */
+struct dma_vchan_info {
+	/* Set to indicate vchan queue is enabled */
+	bool vq_enabled;
+
+	/* Circular buffer for batching DMA ops to dma_dev */
+	struct dma_ops_circular_buffer dma_buf;
+} __rte_cache_aligned;
+
 /* DMA device information */
 struct dma_device_info {
+	/* Pointer to vchan queue info */
+	struct dma_vchan_info *vchanq;
+
+	/* Pointer to vchan queue info.
+	 * This holds ops passed by application till the
+	 * dma completion is done.
+	 */
+	struct dma_vchan_info *tqmap;
+
+	/* If num_vchanq > 0, the start callback will
+	 * be invoked if not already invoked
+	 */
+	uint16_t num_vchanq;
+
 	/* Number of vchans configured for a DMA device. */
 	uint16_t num_dma_dev_vchan;
 } __rte_cache_aligned;
@@ -81,6 +104,9 @@ struct event_dma_adapter {
 
 	/* Set if  default_cb is being used */
 	int default_cb_arg;
+
+	/* No. of vchan queue configured */
+	uint16_t nb_vchanq;
 } __rte_cache_aligned;
 
 static struct event_dma_adapter **event_dma_adapter;
@@ -333,3 +359,181 @@ rte_event_dma_adapter_free(uint8_t id)
 
 	return 0;
 }
+
+static void
+edma_update_vchanq_info(struct event_dma_adapter *adapter, struct dma_device_info *dev_info,
+			uint16_t vchan, uint8_t add)
+{
+	struct dma_vchan_info *vchan_info;
+	struct dma_vchan_info *tqmap_info;
+	int enabled;
+	uint16_t i;
+
+	if (dev_info->vchanq == NULL)
+		return;
+
+	if (vchan == RTE_DMA_ALL_VCHAN) {
+		for (i = 0; i < dev_info->num_dma_dev_vchan; i++)
+			edma_update_vchanq_info(adapter, dev_info, i, add);
+	} else {
+		tqmap_info = &dev_info->tqmap[vchan];
+		vchan_info = &dev_info->vchanq[vchan];
+		enabled = vchan_info->vq_enabled;
+		if (add) {
+			adapter->nb_vchanq += !enabled;
+			dev_info->num_vchanq += !enabled;
+		} else {
+			adapter->nb_vchanq -= enabled;
+			dev_info->num_vchanq -= enabled;
+		}
+		vchan_info->vq_enabled = !!add;
+		tqmap_info->vq_enabled = !!add;
+	}
+}
+
+int
+rte_event_dma_adapter_vchan_add(uint8_t id, int16_t dma_dev_id, uint16_t vchan,
+				const struct rte_event *event)
+{
+	struct event_dma_adapter *adapter;
+	struct dma_device_info *dev_info;
+	struct rte_eventdev *dev;
+	uint32_t cap;
+	int ret;
+
+	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+	if (!rte_dma_is_valid(dma_dev_id)) {
+		RTE_EDEV_LOG_ERR("Invalid dma_dev_id = %" PRIu8, dma_dev_id);
+		return -EINVAL;
+	}
+
+	adapter = edma_id_to_adapter(id);
+	if (adapter == NULL)
+		return -EINVAL;
+
+	dev = &rte_eventdevs[adapter->eventdev_id];
+	ret = rte_event_dma_adapter_caps_get(adapter->eventdev_id, dma_dev_id, &cap);
+	if (ret) {
+		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %u dma_dev %u", id, dma_dev_id);
+		return ret;
+	}
+
+	if ((cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) && (event == NULL)) {
+		RTE_EDEV_LOG_ERR("Event can not be NULL for dma_dev_id = %u", dma_dev_id);
+		return -EINVAL;
+	}
+
+	dev_info = &adapter->dma_devs[dma_dev_id];
+	if (vchan != RTE_DMA_ALL_VCHAN && vchan >= dev_info->num_dma_dev_vchan) {
+		RTE_EDEV_LOG_ERR("Invalid vhcan %u", vchan);
+		return -EINVAL;
+	}
+
+	/* In case HW cap is RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD, no
+	 * need of service core as HW supports event forward capability.
+	 */
+	if ((cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
+	    (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND &&
+	     adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_NEW) ||
+	    (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
+	     adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_NEW)) {
+		if (*dev->dev_ops->dma_adapter_vchan_add == NULL)
+			return -ENOTSUP;
+		if (dev_info->vchanq == NULL) {
+			dev_info->vchanq = rte_zmalloc_socket(adapter->mem_name,
+							dev_info->num_dma_dev_vchan *
+							sizeof(struct dma_vchan_info),
+							0, adapter->socket_id);
+			if (dev_info->vchanq == NULL) {
+				printf("Queue pair add not supported\n");
+				return -ENOMEM;
+			}
+		}
+
+		if (dev_info->tqmap == NULL) {
+			dev_info->tqmap = rte_zmalloc_socket(adapter->mem_name,
+						dev_info->num_dma_dev_vchan *
+						sizeof(struct dma_vchan_info),
+						0, adapter->socket_id);
+			if (dev_info->tqmap == NULL) {
+				printf("tq pair add not supported\n");
+				return -ENOMEM;
+			}
+		}
+
+		ret = (*dev->dev_ops->dma_adapter_vchan_add)(dev, dma_dev_id, vchan, event);
+		if (ret)
+			return ret;
+
+		else
+			edma_update_vchanq_info(adapter, &adapter->dma_devs[dma_dev_id], vchan, 1);
+	}
+
+	return 0;
+}
+
+int
+rte_event_dma_adapter_vchan_del(uint8_t id, int16_t dma_dev_id, uint16_t vchan)
+{
+	struct event_dma_adapter *adapter;
+	struct dma_device_info *dev_info;
+	struct rte_eventdev *dev;
+	uint32_t cap;
+	int ret;
+
+	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+	if (!rte_dma_is_valid(dma_dev_id)) {
+		RTE_EDEV_LOG_ERR("Invalid dma_dev_id = %" PRIu8, dma_dev_id);
+		return -EINVAL;
+	}
+
+	adapter = edma_id_to_adapter(id);
+	if (adapter == NULL)
+		return -EINVAL;
+
+	dev = &rte_eventdevs[adapter->eventdev_id];
+	ret = rte_event_dma_adapter_caps_get(adapter->eventdev_id, dma_dev_id, &cap);
+	if (ret)
+		return ret;
+
+	dev_info = &adapter->dma_devs[dma_dev_id];
+
+	if (vchan != RTE_DMA_ALL_VCHAN && vchan >= dev_info->num_dma_dev_vchan) {
+		RTE_EDEV_LOG_ERR("Invalid vhcan %" PRIu16, vchan);
+		return -EINVAL;
+	}
+
+	if ((cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
+	    (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
+	     adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_NEW)) {
+		if (*dev->dev_ops->dma_adapter_vchan_del == NULL)
+			return -ENOTSUP;
+		ret = (*dev->dev_ops->dma_adapter_vchan_del)(dev, dma_dev_id, vchan);
+		if (ret == 0) {
+			edma_update_vchanq_info(adapter, dev_info, vchan, 0);
+			if (dev_info->num_vchanq == 0) {
+				rte_free(dev_info->vchanq);
+				dev_info->vchanq = NULL;
+			}
+		}
+	} else {
+		if (adapter->nb_vchanq == 0)
+			return 0;
+
+		rte_spinlock_lock(&adapter->lock);
+		edma_update_vchanq_info(adapter, dev_info, vchan, 0);
+
+		if (dev_info->num_vchanq == 0) {
+			rte_free(dev_info->vchanq);
+			rte_free(dev_info->tqmap);
+			dev_info->vchanq = NULL;
+			dev_info->tqmap = NULL;
+		}
+
+		rte_spinlock_unlock(&adapter->lock);
+	}
+
+	return ret;
+}
-- 
2.25.1