From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from dpdk.org (dpdk.org [92.243.14.124])
	by inbox.dpdk.org (Postfix) with ESMTP id A2201A0487
	for <public@inbox.dpdk.org>; Fri,  5 Jul 2019 10:38:43 +0200 (CEST)
Received: from [92.243.14.124] (localhost [127.0.0.1])
	by dpdk.org (Postfix) with ESMTP id A350D1BDFB;
	Fri,  5 Jul 2019 10:38:16 +0200 (CEST)
Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com
 [67.231.156.173]) by dpdk.org (Postfix) with ESMTP id 1E0621BDEB
 for <dev@dpdk.org>; Fri,  5 Jul 2019 10:38:13 +0200 (CEST)
Received: from pps.filterd (m0045851.ppops.net [127.0.0.1])
 by mx0b-0016f401.pphosted.com (8.16.0.27/8.16.0.27) with SMTP id
 x658Z4F5032454; Fri, 5 Jul 2019 01:38:12 -0700
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;
 h=from : to : cc :
 subject : date : message-id : in-reply-to : references : mime-version :
 content-transfer-encoding : content-type; s=pfpt0818;
 bh=cOCzvGBpABbNwvwiZ2qHfxYp2/dW7Iz7jURLNtHD7Oo=;
 b=iGCX9UoYUQJMyeKgcd4p4YFG907XJoJDKMdCdH2XPowG/FfeOGEx+OcldKc7P7yjNXsN
 OQT+AL/Db4aqZdgtE9ZY+Azn+OalDDuqJYE7sM7lfra6P4XBr4RXnoLwS74tMWF3lcir
 GT/M3YswJxdbz8CwPL9lU9e4YVVh5wj5yPN90QAfxfb/5GQMaKOW8Go41YpIrDQ9OeXA
 ldqJRBEoaSHsbH71j5i7DGWGaFZCfSBCybBr7HJRKhLUiIhVe364KPQLpU6JQ+WXEvzB
 OJ6yW+u9DHCiqhmYtG9jecZ+XiLCPQMRZmkxAq74NdzeQP6CUlzKyGm4ZQsW8qzely7z Dw== 
Received: from sc-exch04.marvell.com ([199.233.58.184])
 by mx0b-0016f401.pphosted.com with ESMTP id 2thjyrb27k-1
 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT);
 Fri, 05 Jul 2019 01:38:12 -0700
Received: from SC-EXCH01.marvell.com (10.93.176.81) by SC-EXCH04.marvell.com
 (10.93.176.84) with Microsoft SMTP Server (TLS) id 15.0.1367.3; Fri, 5 Jul
 2019 01:38:10 -0700
Received: from maili.marvell.com (10.93.176.43) by SC-EXCH01.marvell.com
 (10.93.176.81) with Microsoft SMTP Server id 15.0.1367.3 via Frontend
 Transport; Fri, 5 Jul 2019 01:38:10 -0700
Received: from jerin-lab.marvell.com (jerin-lab.marvell.com [10.28.34.14])
 by maili.marvell.com (Postfix) with ESMTP id EB2603F703F;
 Fri,  5 Jul 2019 01:38:08 -0700 (PDT)
From: <jerinj@marvell.com>
To: <dev@dpdk.org>, Satha Rao <skoteshwar@marvell.com>, Vamsi Attunuru
 <vattunuru@marvell.com>, John McNamara <john.mcnamara@intel.com>, "Marko
 Kovacevic" <marko.kovacevic@intel.com>
CC: <thomas@monjalon.net>
Date: Fri, 5 Jul 2019 14:08:02 +0530
Message-ID: <20190705083805.29519-6-jerinj@marvell.com>
X-Mailer: git-send-email 2.22.0
In-Reply-To: <20190705083805.29519-1-jerinj@marvell.com>
References: <20190601182030.8282-1-jerinj@marvell.com>
 <20190705083805.29519-1-jerinj@marvell.com>
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
Content-Type: text/plain
X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:, ,
 definitions=2019-07-05_03:, , signatures=0
Subject: [dpdk-dev] [PATCH v2 5/8] raw/octeontx2_dma: add enqueue operation
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

From: Satha Rao <skoteshwar@marvell.com>

Add enqueue operation.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
---
 doc/guides/rawdevs/octeontx2_dma.rst        |   6 +
 drivers/raw/octeontx2_dma/otx2_dpi_rawdev.c | 166 ++++++++++++++++++++
 2 files changed, 172 insertions(+)

diff --git a/doc/guides/rawdevs/octeontx2_dma.rst b/doc/guides/rawdevs/octeontx2_dma.rst
index 5e926d10d..748f4dbd4 100644
--- a/doc/guides/rawdevs/octeontx2_dma.rst
+++ b/doc/guides/rawdevs/octeontx2_dma.rst
@@ -82,3 +82,9 @@ The following code shows how the device is configured
    rte_mempool_populate_default(conf.chunk_pool);
 
    rte_rawdev_configure(dev_id, (rte_rawdev_obj_t)&rdev_info);
+
+Performing Data Transfer
+------------------------
+
+To perform data transfer using OCTEON TX2 DMA rawdev devices use standard
+``rte_rawdev_enqueue_buffers()`` and ``rte_rawdev_dequeue_buffers()`` APIs.
diff --git a/drivers/raw/octeontx2_dma/otx2_dpi_rawdev.c b/drivers/raw/octeontx2_dma/otx2_dpi_rawdev.c
index f9c330d69..a1d679f83 100644
--- a/drivers/raw/octeontx2_dma/otx2_dpi_rawdev.c
+++ b/drivers/raw/octeontx2_dma/otx2_dpi_rawdev.c
@@ -69,6 +69,171 @@ dma_queue_finish(struct dpi_vf_s *dpivf)
 	return DPI_DMA_QUEUE_SUCCESS;
 }
 
+/* Write an arbitrary number of command words to a command queue */
+static __rte_always_inline enum dpi_dma_queue_result_e
+dma_queue_write(struct dpi_vf_s *dpi, uint16_t cmd_count, uint64_t *cmds)
+{
+	if ((cmd_count < 1) || (cmd_count > 64))
+		return DPI_DMA_QUEUE_INVALID_PARAM;
+
+	if (cmds == NULL)
+		return DPI_DMA_QUEUE_INVALID_PARAM;
+
+	/* Room available in the current buffer for the command */
+	if (dpi->index + cmd_count < dpi->pool_size_m1) {
+		uint64_t *ptr = dpi->base_ptr;
+
+		ptr += dpi->index;
+		dpi->index += cmd_count;
+		while (cmd_count--)
+			*ptr++ = *cmds++;
+	} else {
+		void *new_buffer;
+		uint64_t *ptr;
+		int count;
+
+		/* Allocate new command buffer, return if failed */
+		if (rte_mempool_get(dpi->chunk_pool, &new_buffer) ||
+		    new_buffer == NULL) {
+			return DPI_DMA_QUEUE_NO_MEMORY;
+		}
+		ptr = dpi->base_ptr;
+		/* Figure out how many command words will fit in this buffer.
+		 * One location will be needed for the next buffer pointer.
+		 **/
+		count = dpi->pool_size_m1 - dpi->index;
+		ptr += dpi->index;
+		cmd_count -= count;
+		while (count--)
+			*ptr++ = *cmds++;
+		/* Chunk next ptr is 2DWORDs, second DWORD is reserved. */
+		*ptr++ = (uint64_t)new_buffer;
+		*ptr   = 0;
+		/* The current buffer is full and has a link to the next buffer.
+		 * Time to write the rest of the commands into the new buffer.
+		 **/
+		dpi->base_ptr = new_buffer;
+		dpi->index = cmd_count;
+		ptr = new_buffer;
+		while (cmd_count--)
+			*ptr++ = *cmds++;
+		/* queue index may greater than pool size */
+		if (dpi->index >= dpi->pool_size_m1) {
+			if (rte_mempool_get(dpi->chunk_pool, &new_buffer) ||
+			    new_buffer == NULL) {
+				return DPI_DMA_QUEUE_NO_MEMORY;
+			}
+			/* Write next buffer address */
+			*ptr = (uint64_t)new_buffer;
+			dpi->base_ptr = new_buffer;
+			dpi->index = 0;
+		}
+	}
+	return DPI_DMA_QUEUE_SUCCESS;
+}
+
+/* Submit a DMA command to the DMA queues. */
+static __rte_always_inline int
+dma_queue_submit(struct rte_rawdev *dev, uint16_t cmd_count, uint64_t *cmds)
+{
+	struct dpi_vf_s *dpivf = dev->dev_private;
+	enum dpi_dma_queue_result_e result;
+
+	result = dma_queue_write(dpivf, cmd_count, cmds);
+	rte_wmb();
+	if (likely(result == DPI_DMA_QUEUE_SUCCESS))
+		otx2_write64((uint64_t)cmd_count,
+			     dpivf->vf_bar0 + DPI_VDMA_DBELL);
+
+	return result;
+}
+
+/* Enqueue buffers to DMA queue
+ * returns number of buffers enqueued successfully
+ */
+static int
+otx2_dpi_rawdev_enqueue_bufs(struct rte_rawdev *dev,
+			     struct rte_rawdev_buf **buffers,
+			     unsigned int count, rte_rawdev_obj_t context)
+{
+	struct dpi_dma_queue_ctx_s *ctx = (struct dpi_dma_queue_ctx_s *)context;
+	struct dpi_dma_buf_ptr_s *cmd;
+	uint32_t c = 0;
+
+	for (c = 0; c < count; c++) {
+		uint64_t dpi_cmd[DPI_DMA_CMD_SIZE] = {0};
+		union dpi_dma_instr_hdr_u *hdr;
+		uint16_t index = 0, i;
+
+		hdr = (union dpi_dma_instr_hdr_u *)&dpi_cmd[0];
+		cmd = (struct dpi_dma_buf_ptr_s *)buffers[c]->buf_addr;
+
+		hdr->s.xtype = ctx->xtype & DPI_XTYPE_MASK;
+		hdr->s.pt = ctx->pt & DPI_HDR_PT_MASK;
+		/* Request initiated with byte write completion, but completion
+		 * pointer not provided
+		 */
+		if ((hdr->s.pt == DPI_HDR_PT_ZBW_CA ||
+		     hdr->s.pt == DPI_HDR_PT_ZBW_NC) && cmd->comp_ptr == NULL)
+			return c;
+
+		cmd->comp_ptr->cdata = DPI_REQ_CDATA;
+		hdr->s.ptr = (uint64_t)cmd->comp_ptr;
+		hdr->s.deallocv = ctx->deallocv;
+		hdr->s.tt = ctx->tt & DPI_W0_TT_MASK;
+		hdr->s.grp = ctx->grp & DPI_W0_GRP_MASK;
+
+		/* If caller provides completion ring details, then only queue
+		 * completion address for later polling.
+		 */
+		if (ctx->c_ring) {
+			ctx->c_ring->compl_data[ctx->c_ring->tail] =
+								 cmd->comp_ptr;
+			STRM_INC(ctx->c_ring);
+		}
+
+		if (hdr->s.deallocv)
+			hdr->s.pvfe = 1;
+
+		if (hdr->s.pt == DPI_HDR_PT_WQP)
+			hdr->s.ptr = hdr->s.ptr | DPI_HDR_PT_WQP_STATUSNC;
+
+		index += 4;
+		hdr->s.fport = 0;
+		hdr->s.lport = 0;
+
+		/* For inbound case, src pointers are last pointers.
+		 * For all other cases, src pointers are first pointers.
+		 */
+		if (ctx->xtype ==  DPI_XTYPE_INBOUND) {
+			hdr->s.nfst = cmd->wptr_cnt & DPI_MAX_POINTER;
+			hdr->s.nlst = cmd->rptr_cnt & DPI_MAX_POINTER;
+			for (i = 0; i < hdr->s.nfst; i++) {
+				dpi_cmd[index++] = cmd->wptr[i]->u[0];
+				dpi_cmd[index++] = cmd->wptr[i]->u[1];
+			}
+			for (i = 0; i < hdr->s.nlst; i++) {
+				dpi_cmd[index++] = cmd->rptr[i]->u[0];
+				dpi_cmd[index++] = cmd->rptr[i]->u[1];
+			}
+		} else {
+			hdr->s.nfst = cmd->rptr_cnt & DPI_MAX_POINTER;
+			hdr->s.nlst = cmd->wptr_cnt & DPI_MAX_POINTER;
+			for (i = 0; i < hdr->s.nfst; i++) {
+				dpi_cmd[index++] = cmd->rptr[i]->u[0];
+				dpi_cmd[index++] = cmd->rptr[i]->u[1];
+			}
+			for (i = 0; i < hdr->s.nlst; i++) {
+				dpi_cmd[index++] = cmd->wptr[i]->u[0];
+				dpi_cmd[index++] = cmd->wptr[i]->u[1];
+			}
+		}
+		if (dma_queue_submit(dev, index, dpi_cmd))
+			return c;
+	}
+	return c;
+}
+
 static int
 otx2_dpi_rawdev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config)
 {
@@ -108,6 +273,7 @@ otx2_dpi_rawdev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config)
 
 static const struct rte_rawdev_ops dpi_rawdev_ops = {
 	.dev_configure = otx2_dpi_rawdev_configure,
+	.enqueue_bufs = otx2_dpi_rawdev_enqueue_bufs,
 };
 
 static int
-- 
2.22.0