From: <jerinj@marvell.com>
To: <dev@dpdk.org>, Satha Rao <skoteshwar@marvell.com>,
Vamsi Attunuru <vattunuru@marvell.com>,
John McNamara <john.mcnamara@intel.com>,
"Marko Kovacevic" <marko.kovacevic@intel.com>
Cc: <thomas@monjalon.net>
Subject: [dpdk-dev] [PATCH v2 5/8] raw/octeontx2_dma: add enqueue operation
Date: Fri, 5 Jul 2019 14:08:02 +0530 [thread overview]
Message-ID: <20190705083805.29519-6-jerinj@marvell.com> (raw)
In-Reply-To: <20190705083805.29519-1-jerinj@marvell.com>
From: Satha Rao <skoteshwar@marvell.com>
Add enqueue operation.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
---
doc/guides/rawdevs/octeontx2_dma.rst | 6 +
drivers/raw/octeontx2_dma/otx2_dpi_rawdev.c | 166 ++++++++++++++++++++
2 files changed, 172 insertions(+)
diff --git a/doc/guides/rawdevs/octeontx2_dma.rst b/doc/guides/rawdevs/octeontx2_dma.rst
index 5e926d10d..748f4dbd4 100644
--- a/doc/guides/rawdevs/octeontx2_dma.rst
+++ b/doc/guides/rawdevs/octeontx2_dma.rst
@@ -82,3 +82,9 @@ The following code shows how the device is configured
rte_mempool_populate_default(conf.chunk_pool);
rte_rawdev_configure(dev_id, (rte_rawdev_obj_t)&rdev_info);
+
+Performing Data Transfer
+------------------------
+
+To perform data transfer using OCTEON TX2 DMA rawdev devices use standard
+``rte_rawdev_enqueue_buffers()`` and ``rte_rawdev_dequeue_buffers()`` APIs.
diff --git a/drivers/raw/octeontx2_dma/otx2_dpi_rawdev.c b/drivers/raw/octeontx2_dma/otx2_dpi_rawdev.c
index f9c330d69..a1d679f83 100644
--- a/drivers/raw/octeontx2_dma/otx2_dpi_rawdev.c
+++ b/drivers/raw/octeontx2_dma/otx2_dpi_rawdev.c
@@ -69,6 +69,171 @@ dma_queue_finish(struct dpi_vf_s *dpivf)
return DPI_DMA_QUEUE_SUCCESS;
}
+/* Write an arbitrary number of command words to a command queue */
+static __rte_always_inline enum dpi_dma_queue_result_e
+dma_queue_write(struct dpi_vf_s *dpi, uint16_t cmd_count, uint64_t *cmds)
+{
+ if ((cmd_count < 1) || (cmd_count > 64))
+ return DPI_DMA_QUEUE_INVALID_PARAM;
+
+ if (cmds == NULL)
+ return DPI_DMA_QUEUE_INVALID_PARAM;
+
+ /* Room available in the current buffer for the command */
+ if (dpi->index + cmd_count < dpi->pool_size_m1) {
+ uint64_t *ptr = dpi->base_ptr;
+
+ ptr += dpi->index;
+ dpi->index += cmd_count;
+ while (cmd_count--)
+ *ptr++ = *cmds++;
+ } else {
+ void *new_buffer;
+ uint64_t *ptr;
+ int count;
+
+ /* Allocate new command buffer, return if failed */
+ if (rte_mempool_get(dpi->chunk_pool, &new_buffer) ||
+ new_buffer == NULL) {
+ return DPI_DMA_QUEUE_NO_MEMORY;
+ }
+ ptr = dpi->base_ptr;
+ /* Figure out how many command words will fit in this buffer.
+ * One location will be needed for the next buffer pointer.
+ **/
+ count = dpi->pool_size_m1 - dpi->index;
+ ptr += dpi->index;
+ cmd_count -= count;
+ while (count--)
+ *ptr++ = *cmds++;
+ /* Chunk next ptr is 2DWORDs, second DWORD is reserved. */
+ *ptr++ = (uint64_t)new_buffer;
+ *ptr = 0;
+ /* The current buffer is full and has a link to the next buffer.
+ * Time to write the rest of the commands into the new buffer.
+ **/
+ dpi->base_ptr = new_buffer;
+ dpi->index = cmd_count;
+ ptr = new_buffer;
+ while (cmd_count--)
+ *ptr++ = *cmds++;
+ /* queue index may greater than pool size */
+ if (dpi->index >= dpi->pool_size_m1) {
+ if (rte_mempool_get(dpi->chunk_pool, &new_buffer) ||
+ new_buffer == NULL) {
+ return DPI_DMA_QUEUE_NO_MEMORY;
+ }
+ /* Write next buffer address */
+ *ptr = (uint64_t)new_buffer;
+ dpi->base_ptr = new_buffer;
+ dpi->index = 0;
+ }
+ }
+ return DPI_DMA_QUEUE_SUCCESS;
+}
+
+/* Submit a DMA command to the DMA queues. */
+static __rte_always_inline int
+dma_queue_submit(struct rte_rawdev *dev, uint16_t cmd_count, uint64_t *cmds)
+{
+ struct dpi_vf_s *dpivf = dev->dev_private;
+ enum dpi_dma_queue_result_e result;
+
+ result = dma_queue_write(dpivf, cmd_count, cmds);
+ rte_wmb();
+ if (likely(result == DPI_DMA_QUEUE_SUCCESS))
+ otx2_write64((uint64_t)cmd_count,
+ dpivf->vf_bar0 + DPI_VDMA_DBELL);
+
+ return result;
+}
+
+/* Enqueue buffers to DMA queue
+ * returns number of buffers enqueued successfully
+ */
+static int
+otx2_dpi_rawdev_enqueue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count, rte_rawdev_obj_t context)
+{
+ struct dpi_dma_queue_ctx_s *ctx = (struct dpi_dma_queue_ctx_s *)context;
+ struct dpi_dma_buf_ptr_s *cmd;
+ uint32_t c = 0;
+
+ for (c = 0; c < count; c++) {
+ uint64_t dpi_cmd[DPI_DMA_CMD_SIZE] = {0};
+ union dpi_dma_instr_hdr_u *hdr;
+ uint16_t index = 0, i;
+
+ hdr = (union dpi_dma_instr_hdr_u *)&dpi_cmd[0];
+ cmd = (struct dpi_dma_buf_ptr_s *)buffers[c]->buf_addr;
+
+ hdr->s.xtype = ctx->xtype & DPI_XTYPE_MASK;
+ hdr->s.pt = ctx->pt & DPI_HDR_PT_MASK;
+ /* Request initiated with byte write completion, but completion
+ * pointer not provided
+ */
+ if ((hdr->s.pt == DPI_HDR_PT_ZBW_CA ||
+ hdr->s.pt == DPI_HDR_PT_ZBW_NC) && cmd->comp_ptr == NULL)
+ return c;
+
+ cmd->comp_ptr->cdata = DPI_REQ_CDATA;
+ hdr->s.ptr = (uint64_t)cmd->comp_ptr;
+ hdr->s.deallocv = ctx->deallocv;
+ hdr->s.tt = ctx->tt & DPI_W0_TT_MASK;
+ hdr->s.grp = ctx->grp & DPI_W0_GRP_MASK;
+
+ /* If caller provides completion ring details, then only queue
+ * completion address for later polling.
+ */
+ if (ctx->c_ring) {
+ ctx->c_ring->compl_data[ctx->c_ring->tail] =
+ cmd->comp_ptr;
+ STRM_INC(ctx->c_ring);
+ }
+
+ if (hdr->s.deallocv)
+ hdr->s.pvfe = 1;
+
+ if (hdr->s.pt == DPI_HDR_PT_WQP)
+ hdr->s.ptr = hdr->s.ptr | DPI_HDR_PT_WQP_STATUSNC;
+
+ index += 4;
+ hdr->s.fport = 0;
+ hdr->s.lport = 0;
+
+ /* For inbound case, src pointers are last pointers.
+ * For all other cases, src pointers are first pointers.
+ */
+ if (ctx->xtype == DPI_XTYPE_INBOUND) {
+ hdr->s.nfst = cmd->wptr_cnt & DPI_MAX_POINTER;
+ hdr->s.nlst = cmd->rptr_cnt & DPI_MAX_POINTER;
+ for (i = 0; i < hdr->s.nfst; i++) {
+ dpi_cmd[index++] = cmd->wptr[i]->u[0];
+ dpi_cmd[index++] = cmd->wptr[i]->u[1];
+ }
+ for (i = 0; i < hdr->s.nlst; i++) {
+ dpi_cmd[index++] = cmd->rptr[i]->u[0];
+ dpi_cmd[index++] = cmd->rptr[i]->u[1];
+ }
+ } else {
+ hdr->s.nfst = cmd->rptr_cnt & DPI_MAX_POINTER;
+ hdr->s.nlst = cmd->wptr_cnt & DPI_MAX_POINTER;
+ for (i = 0; i < hdr->s.nfst; i++) {
+ dpi_cmd[index++] = cmd->rptr[i]->u[0];
+ dpi_cmd[index++] = cmd->rptr[i]->u[1];
+ }
+ for (i = 0; i < hdr->s.nlst; i++) {
+ dpi_cmd[index++] = cmd->wptr[i]->u[0];
+ dpi_cmd[index++] = cmd->wptr[i]->u[1];
+ }
+ }
+ if (dma_queue_submit(dev, index, dpi_cmd))
+ return c;
+ }
+ return c;
+}
+
static int
otx2_dpi_rawdev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config)
{
@@ -108,6 +273,7 @@ otx2_dpi_rawdev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config)
static const struct rte_rawdev_ops dpi_rawdev_ops = {
.dev_configure = otx2_dpi_rawdev_configure,
+ .enqueue_bufs = otx2_dpi_rawdev_enqueue_bufs,
};
static int
--
2.22.0
next prev parent reply other threads:[~2019-07-05 8:38 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-06-01 18:20 [dpdk-dev] [PATCH v1 0/9] OCTEON TX2 DMA driver jerinj
2019-06-01 18:20 ` [dpdk-dev] [PATCH v1 1/9] raw/octeontx2_dma: add build infra and device probe jerinj
2019-06-03 11:17 ` Luca Boccassi
2019-06-06 8:32 ` Jerin Jacob Kollanukkaran
2019-06-06 8:49 ` Luca Boccassi
2019-06-01 18:20 ` [dpdk-dev] [PATCH v1 2/9] raw/octeontx2_dma: update probe function jerinj
2019-06-01 18:20 ` [dpdk-dev] [PATCH v1 3/9] raw/octeontx2_dma: add device configuration jerinj
2019-06-01 18:20 ` [dpdk-dev] [PATCH v1 4/9] raw/octeontx2_dma: add device close operation jerinj
2019-06-01 18:20 ` [dpdk-dev] [PATCH v1 5/9] raw/octeontx2_dma: add enqueue operation jerinj
2019-06-01 18:20 ` [dpdk-dev] [PATCH v1 6/9] raw/octeontx2_dma: add dequeue and device control operations jerinj
2019-06-01 18:20 ` [dpdk-dev] [PATCH v1 7/9] raw/octeontx2_dma: add driver self test jerinj
2019-06-01 18:20 ` [dpdk-dev] [PATCH v1 8/9] usertools: add octeontx2 DMA device jerinj
2019-06-01 18:20 ` [dpdk-dev] [PATCH v1 9/9] raw/octeontx2_dma: add documentation jerinj
2019-07-05 8:37 ` [dpdk-dev] [PATCH v2 0/8] OCTEON TX2 DMA driver jerinj
2019-07-05 8:37 ` [dpdk-dev] [PATCH v2 1/8] raw/octeontx2_dma: add build infra and device probe jerinj
2019-07-05 8:37 ` [dpdk-dev] [PATCH v2 2/8] raw/octeontx2_dma: update probe function jerinj
2019-07-05 8:38 ` [dpdk-dev] [PATCH v2 3/8] raw/octeontx2_dma: add device configuration jerinj
2019-07-05 8:38 ` [dpdk-dev] [PATCH v2 4/8] raw/octeontx2_dma: add device close operation jerinj
2019-07-05 8:38 ` jerinj [this message]
2019-07-05 8:38 ` [dpdk-dev] [PATCH v2 6/8] raw/octeontx2_dma: add dequeue and device control operations jerinj
2019-07-05 8:38 ` [dpdk-dev] [PATCH v2 7/8] raw/octeontx2_dma: add driver self test jerinj
2019-07-05 8:38 ` [dpdk-dev] [PATCH v2 8/8] usertools: add octeontx2 DMA device binding jerinj
2019-07-05 10:45 ` [dpdk-dev] [PATCH v2 0/8] OCTEON TX2 DMA driver Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190705083805.29519-6-jerinj@marvell.com \
--to=jerinj@marvell.com \
--cc=dev@dpdk.org \
--cc=john.mcnamara@intel.com \
--cc=marko.kovacevic@intel.com \
--cc=skoteshwar@marvell.com \
--cc=thomas@monjalon.net \
--cc=vattunuru@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).