From: Shally Verma <shally.verma@caviumnetworks.com>
To: pablo.de.lara.guarch@intel.com
Cc: dev@dpdk.org, pathreya@caviumnetworks.com,
mchalla@caviumnetworks.com,
Ashish Gupta <Ashish.Gupta@caviumnetworks.com>,
Ashish Gupta <ashish.gupta@caviumnetworks.com>,
Sunila Sahu <sunila.sahu@caviumnetworks.com>
Subject: [dpdk-dev] [PATCH v2 4/6] compress/octeontx: add ops enq deq apis
Date: Mon, 2 Jul 2018 22:24:35 +0530 [thread overview]
Message-ID: <1530550477-22444-5-git-send-email-shally.verma@caviumnetworks.com> (raw)
In-Reply-To: <1530550477-22444-1-git-send-email-shally.verma@caviumnetworks.com>
From: Ashish Gupta <Ashish.Gupta@caviumnetworks.com>
implement enqueue and dequeue apis
Signed-off-by: Ashish Gupta <ashish.gupta@caviumnetworks.com>
Signed-off-by: Shally Verma <shally.verma@caviumnetworks.com>
Signed-off-by: Sunila Sahu <sunila.sahu@caviumnetworks.com>
---
drivers/compress/octeontx/zip_pmd.c | 114 ++++++++++++++++++++++++
drivers/compress/octeontx/zipvf.c | 49 +++++++++++
drivers/compress/octeontx/zipvf.h | 169 ++++++++++++++++++++++++++++++++++++
3 files changed, 332 insertions(+)
diff --git a/drivers/compress/octeontx/zip_pmd.c b/drivers/compress/octeontx/zip_pmd.c
index c8feb96bc..4a0eea41a 100644
--- a/drivers/compress/octeontx/zip_pmd.c
+++ b/drivers/compress/octeontx/zip_pmd.c
@@ -25,6 +25,67 @@ static const struct rte_compressdev_capabilities
RTE_COMP_END_OF_CAPABILITIES_LIST()
};
+/*
+ * Reset session to default state for next set of stateless operation
+ */
+static inline void
+reset_stream(struct zip_stream *z_stream)
+{
+ union zip_inst_s *inst = (union zip_inst_s *)(z_stream->inst);
+
+ inst->s.bf = 1;
+ inst->s.ef = 0;
+}
+
+int
+zip_process_op(struct rte_comp_op *op,
+ struct zipvf_qp *qp,
+ struct zip_stream *zstrm)
+{
+ int ret;
+ union zip_inst_s *inst = zstrm->inst;
+ volatile union zip_zres_s *zresult = NULL;
+
+ zipvf_prepare_cmd_stateless(op, zstrm);
+
+ zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF];
+ zresult->s.compcode = 0;
+
+#ifdef ZIP_DBG
+ zip_dump_instruction(inst);
+#endif
+
+ /* Submit zip command */
+ ret = zipvf_push_command(qp, (void *)inst);
+
+ /* Check and Process results in sync mode */
+ do {
+ } while (!zresult->s.compcode);
+
+ if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) {
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+ } else {
+ /* FATAL error cannot do anything */
+ ZIP_PMD_ERR("operation failed with error code:%d\n",
+ zresult->s.compcode);
+ if (zresult->s.compcode == ZIP_COMP_E_DSTOP)
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ else
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ }
+
+ ZIP_PMD_INFO("ret %d,written %d\n", ret, zresult->s.totalbyteswritten);
+
+ op->produced = zresult->s.totalbyteswritten;
+ op->consumed = zresult->s.totalbytesread;
+
+ if (zresult->s.ef == 1)
+ reset_stream(zstrm);
+
+ zresult->s.compcode = 0;
+ return ret;
+}
+
/** Parse xform parameters and setup a stream */
int
zip_set_stream_parameters(struct rte_compressdev *dev,
@@ -111,6 +172,7 @@ zip_set_stream_parameters(struct rte_compressdev *dev,
inst->s.res_ptr_ctl.s.length = 0;
z_stream->inst = inst;
+ z_stream->func = zip_process_op;
return 0;
@@ -385,6 +447,56 @@ zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
return 0;
}
+static uint16_t
+zip_pmd_enqueue_burst_sync(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zipvf_qp *qp = queue_pair;
+ struct rte_comp_op *op;
+ struct zip_stream *zstrm;
+ int ret, i;
+ uint16_t enqd = 0;
+
+ for (i = 0; i < nb_ops; i++) {
+ op = ops[i];
+ if (op->op_type == RTE_COMP_OP_STATEFUL)
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ else {
+ /* process stateless ops */
+ zstrm = (struct zip_stream *)op->private_xform;
+ ret = zstrm->func(op, qp, zstrm);
+ }
+
+ /* Whatever is out of op, put it into completion queue with
+ * its status
+ */
+ ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);
+ if (unlikely(ret < 0)) {
+ /* increment count if failed to enqueue op*/
+ qp->qp_stats.enqueue_err_count++;
+ } else {
+ qp->qp_stats.enqueued_count++;
+ enqd++;
+ }
+ }
+ return enqd;
+}
+
+static uint16_t
+zip_pmd_dequeue_burst_sync(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zipvf_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
struct rte_compressdev_ops octtx_zip_pmd_ops = {
.dev_configure = zip_pmd_config,
.dev_start = zip_pmd_start,
@@ -446,6 +558,8 @@ zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
compressdev->dev_ops = &octtx_zip_pmd_ops;
/* register rx/tx burst functions for data path */
+ compressdev->dequeue_burst = zip_pmd_dequeue_burst_sync;
+ compressdev->enqueue_burst = zip_pmd_enqueue_burst_sync;
compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
return ret;
}
diff --git a/drivers/compress/octeontx/zipvf.c b/drivers/compress/octeontx/zipvf.c
index 2a74e8bbb..996ae69ab 100644
--- a/drivers/compress/octeontx/zipvf.c
+++ b/drivers/compress/octeontx/zipvf.c
@@ -91,6 +91,55 @@ zipvf_q_term(struct zipvf_qp *qp)
}
+int
+zipvf_push_command(struct zipvf_qp *qp, union zip_inst_s *cmd)
+{
+ zip_quex_doorbell_t dbell;
+ union zip_nptr_s ncp;
+ uint64_t *ncb_ptr;
+ struct zipvf_cmdq *cmdq = &qp->cmdq;
+ void *reg_base = qp->vf->vbar0;
+
+ /*Held queue lock*/
+ rte_spinlock_lock(&(cmdq->qlock));
+
+ /* Check space availability in zip cmd queue */
+ if ((((cmdq->sw_head - (uint64_t *)cmdq->va) * sizeof(uint64_t *)) +
+ ZIP_CMD_SIZE) == (ZIP_MAX_CMDQ_SIZE - 8)) {
+ /*Last buffer of the command queue*/
+ memcpy((uint8_t *)cmdq->sw_head,
+ (uint8_t *)cmd,
+ sizeof(union zip_inst_s));
+ /* move pointer to next loc in unit of 64-bit word */
+ cmdq->sw_head += ZIP_CMD_SIZE_WORDS;
+
+ /* now, point the "Next-Chunk Buffer Ptr" to sw_head */
+ ncb_ptr = cmdq->sw_head;
+ /* Pointing head again to cmdqueue base*/
+ cmdq->sw_head = (uint64_t *)cmdq->va;
+
+ ncp.u = 0ull;
+ ncp.s.addr = cmdq->iova;
+ *ncb_ptr = ncp.u;
+ } else {
+ /*Enough buffers available in the command queue*/
+ memcpy((uint8_t *)cmdq->sw_head,
+ (uint8_t *)cmd,
+ sizeof(union zip_inst_s));
+ cmdq->sw_head += ZIP_CMD_SIZE_WORDS;
+ }
+
+ rte_wmb();
+
+ /* Ringing ZIP VF doorbell */
+ dbell.u = 0ull;
+ dbell.s.dbell_cnt = 1;
+ zip_reg_write64(reg_base, ZIP_VQ_DOORBELL, dbell.u);
+
+ rte_spinlock_unlock(&(cmdq->qlock));
+ return 0;
+}
+
int
zipvf_create(struct rte_compressdev *compressdev)
{
diff --git a/drivers/compress/octeontx/zipvf.h b/drivers/compress/octeontx/zipvf.h
index 038877e4e..ee39b8eb3 100644
--- a/drivers/compress/octeontx/zipvf.h
+++ b/drivers/compress/octeontx/zipvf.h
@@ -156,6 +156,170 @@ struct zip_vf {
/* pointer to pools */
} __rte_cache_aligned;
+
+static inline int
+zipvf_prepare_in_buf(struct zip_stream *zstrm, struct rte_comp_op *op)
+{
+ uint32_t offset, inlen;
+ union zip_zptr_s *sg_list = NULL;
+ struct rte_mbuf *m_src;
+ union zip_inst_s *inst = zstrm->inst;
+ rte_iova_t iova;
+
+ inlen = op->src.length;
+ offset = op->src.offset;
+ m_src = op->m_src;
+
+ if (m_src->nb_segs == 1) {
+ /* Prepare direct input data pointer */
+ inst->s.dg = 0;
+ inst->s.inp_ptr_addr.s.addr =
+ rte_pktmbuf_iova_offset(m_src, offset);
+ inst->s.inp_ptr_ctl.s.length = inlen;
+ return 0;
+ }
+
+ ZIP_PMD_INFO("Input packet is segmented\n");
+
+ /* Packet is segmented, create gather buffer */
+ inst->s.dg = 1;
+ iova = rte_mempool_virt2iova(zstrm->bufs[IN_DATA_BUF]);
+ if (iova & 0xF) {
+ /* Align it to 16 Byte address */
+ iova = ZIP_ALIGN_ROUNDUP(iova, ZIP_SGPTR_ALIGN);
+ }
+
+ inst->s.inp_ptr_addr.s.addr = iova;
+ inst->s.inp_ptr_ctl.s.length = (m_src->nb_segs < MAX_SG_LEN) ?
+ (m_src->nb_segs) : MAX_SG_LEN;
+
+ sg_list = (union zip_zptr_s *)(zstrm->bufs[IN_DATA_BUF]);
+
+ int i = 0;
+ rte_iova_t addr;
+ uint16_t len;
+
+ while (i < inst->s.inp_ptr_ctl.s.length) {
+ addr = rte_pktmbuf_iova_offset(m_src, offset);
+ len = rte_pktmbuf_data_len(m_src);
+ if (len > inlen)
+ len = inlen;
+ sg_list[i].s.addr = addr;
+ sg_list[i].s.length = len;
+ i++;
+ inlen -= len;
+ m_src = m_src->next;//try offset += len instead
+ offset = 0;
+ }
+ return 0;
+}
+
+static inline int
+zipvf_prepare_out_buf(struct zip_stream *zstrm, struct rte_comp_op *op)
+{
+ uint32_t offset;
+ union zip_zptr_s *sg_list = NULL;
+ struct rte_mbuf *m_dst;
+ union zip_inst_s *inst = zstrm->inst;
+ rte_iova_t iova;
+
+ offset = op->src.offset;
+ m_dst = op->m_dst;
+
+ if (m_dst->nb_segs == 1) {
+ /* Prepare direct input data pointer */
+ inst->s.ds = 0;
+ inst->s.out_ptr_addr.s.addr =
+ rte_pktmbuf_iova_offset(m_dst, offset);
+ inst->s.totaloutputlength = rte_pktmbuf_data_len(m_dst) -
+ op->dst.offset;
+ inst->s.out_ptr_ctl.s.length = inst->s.totaloutputlength;
+ return 0;
+ }
+
+ ZIP_PMD_INFO("output packet is segmented\n");
+
+ /* Packet is segmented, create gather buffer */
+ inst->s.ds = 1;
+ iova = rte_mempool_virt2iova(zstrm->bufs[OUT_DATA_BUF]);
+ if (iova & 0xF) {
+ /* Align it to 16 Byte address */
+ iova = ZIP_ALIGN_ROUNDUP(iova, ZIP_SGPTR_ALIGN);
+ }
+
+ inst->s.out_ptr_addr.s.addr = iova;
+ inst->s.inp_ptr_ctl.s.length = (m_dst->nb_segs < MAX_SG_LEN) ?
+ (m_dst->nb_segs) : MAX_SG_LEN;
+
+ sg_list = (union zip_zptr_s *)(zstrm->bufs[OUT_DATA_BUF]);
+
+ int i = 0;
+
+ while (i < inst->s.inp_ptr_ctl.s.length) {
+ sg_list[i].s.addr = rte_pktmbuf_iova_offset(m_dst, offset);
+ sg_list[i].s.length = rte_pktmbuf_data_len(m_dst);
+ inst->s.totaloutputlength += sg_list[i].s.length;
+ m_dst = m_dst->next;//try offset += len instead
+ offset = 0;
+ i++;
+ }
+
+ return 0;
+}
+
+static inline int
+zipvf_prepare_cmd_stateless(struct rte_comp_op *op, struct zip_stream *zstrm)
+{
+ union zip_inst_s *inst = zstrm->inst;
+
+ /* set flush flag to always 1*/
+ inst->s.ef = 1;
+
+ if (inst->s.op == ZIP_OP_E_DECOMP)
+ inst->s.sf = 1;
+ else
+ inst->s.sf = 0;
+
+ /* Set input checksum */
+ inst->s.adlercrc32 = op->input_chksum;
+
+ /* Prepare gather buffers if input packet is segmented */
+ zipvf_prepare_in_buf(zstrm, op);
+ zipvf_prepare_out_buf(zstrm, op);
+
+ return 0;
+}
+
+#ifdef ZIP_DBG
+static inline void
+zip_dump_instruction(void *inst)
+{
+ union zip_inst_s *cmd83 = (union zip_inst_s *)inst;
+ printf("####### START ########\n");
+ printf("doneint:%d totaloutputlength:%d\n", cmd83->s.doneint,
+ cmd83->s.totaloutputlength);
+ printf("exnum:%d iv:%d exbits:%d hmif:%d halg:%d\n", cmd83->s.exn,
+ cmd83->s.iv, cmd83->s.exbits, cmd83->s.hmif, cmd83->s.halg);
+ printf("flush:%d speed:%d cc:%d\n", cmd83->s.sf,
+ cmd83->s.ss, cmd83->s.cc);
+ printf("eof:%d bof:%d op:%d dscatter:%d dgather:%d hgather:%d\n",
+ cmd83->s.ef, cmd83->s.bf, cmd83->s.op, cmd83->s.ds,
+ cmd83->s.dg, cmd83->s.hg);
+ printf("historylength:%d adler32:%d\n", cmd83->s.historylength,
+ cmd83->s.adlercrc32);
+ printf("ctx_ptr.addr:0x%lx\n", cmd83->s.ctx_ptr_addr.s.addr);
+ printf("ctx_ptr.len:%d\n", cmd83->s.ctx_ptr_ctl.s.length);
+ printf("history_ptr.addr:0x%lx\n", cmd83->s.his_ptr_addr.s.addr);
+ printf("history_ptr.len:%d\n", cmd83->s.his_ptr_ctl.s.length);
+ printf("inp_ptr.addr:0x%lx\n", cmd83->s.inp_ptr_addr.s.addr);
+ printf("inp_ptr.len:%d\n", cmd83->s.inp_ptr_ctl.s.length);
+ printf("out_ptr.addr:0x%lx\n", cmd83->s.out_ptr_addr.s.addr);
+ printf("out_ptr.len:%d\n", cmd83->s.out_ptr_ctl.s.length);
+ printf("result_ptr.len:%d\n", cmd83->s.res_ptr_ctl.s.length);
+ printf("####### END ########\n");
+}
+#endif
+
int
zipvf_create(struct rte_compressdev *compressdev);
@@ -176,6 +340,11 @@ zip_set_stream_parameters(struct rte_compressdev *dev,
const struct rte_comp_xform *xform,
struct zip_stream *z_stream);
+int
+zip_process_op(struct rte_comp_op *op,
+ struct zipvf_qp *qp,
+ struct zip_stream *zstrm);
+
uint64_t
zip_reg_read64(uint8_t *hw_addr, uint64_t offset);
--
2.14.3
next prev parent reply other threads:[~2018-07-02 16:56 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-07-02 16:54 [dpdk-dev] [PATCH v2 0/6] compress: add Octeontx ZIP compression PMD Shally Verma
2018-07-02 16:54 ` [dpdk-dev] [PATCH v2 1/6] compress/octeontx: add octeontx zip PMD support Shally Verma
2018-07-13 16:23 ` De Lara Guarch, Pablo
2018-07-02 16:54 ` [dpdk-dev] [PATCH v2 2/6] compress/octeontx: add device setup PMD ops Shally Verma
2018-07-13 22:21 ` De Lara Guarch, Pablo
2018-07-02 16:54 ` [dpdk-dev] [PATCH v2 3/6] compress/octeontx: add xform and stream create support Shally Verma
2018-07-13 22:24 ` De Lara Guarch, Pablo
2018-07-17 12:50 ` Verma, Shally
2018-07-02 16:54 ` Shally Verma [this message]
2018-07-02 16:54 ` [dpdk-dev] [PATCH v2 5/6] doc: add Octeonx zip guide Shally Verma
2018-07-13 22:26 ` De Lara Guarch, Pablo
2018-07-02 16:54 ` [dpdk-dev] [PATCH v2 6/6] usertools: update devbind for octeontx zip device Shally Verma
2018-07-11 5:53 ` [dpdk-dev] [PATCH v2 0/6] compress: add Octeontx ZIP compression PMD Verma, Shally
2018-07-11 8:24 ` De Lara Guarch, Pablo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1530550477-22444-5-git-send-email-shally.verma@caviumnetworks.com \
--to=shally.verma@caviumnetworks.com \
--cc=Ashish.Gupta@caviumnetworks.com \
--cc=dev@dpdk.org \
--cc=mchalla@caviumnetworks.com \
--cc=pablo.de.lara.guarch@intel.com \
--cc=pathreya@caviumnetworks.com \
--cc=sunila.sahu@caviumnetworks.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).