DPDK patches and discussions
 help / color / mirror / Atom feed
From: Shally Verma <shally.verma@caviumnetworks.com>
To: pablo.de.lara.guarch@intel.com
Cc: fiona.trahe@intel.com, dev@dpdk.org, pathreya@caviumnetworks.com,
	mchalla@caviumnetworks.com,
	Ashish Gupta <ashish.gupta@caviumnetworks.com>,
	Sunila Sahu <sunila.sahu@caviumnetworks.com>
Subject: [dpdk-dev] [PATCH v1 4/7] compress/octeontx: add ops enq deq apis
Date: Tue,  5 Jun 2018 16:05:10 +0530	[thread overview]
Message-ID: <1528194913-25893-5-git-send-email-shally.verma@caviumnetworks.com> (raw)
In-Reply-To: <1528194913-25893-1-git-send-email-shally.verma@caviumnetworks.com>

implement enqueue and dequeue apis

Signed-off-by: Ashish Gupta <ashish.gupta@caviumnetworks.com>
Signed-off-by: Shally Verma <shally.verma@caviumnetworks.com>
Signed-off-by: Sunila Sahu <sunila.sahu@caviumnetworks.com>
---
 drivers/compress/octeontx/zip_pmd.c | 111 ++++++++++++++++++++++++
 drivers/compress/octeontx/zipvf.c   |  49 +++++++++++
 drivers/compress/octeontx/zipvf.h   | 164 ++++++++++++++++++++++++++++++++++++
 3 files changed, 324 insertions(+)

diff --git a/drivers/compress/octeontx/zip_pmd.c b/drivers/compress/octeontx/zip_pmd.c
index 349114626..9e629fd17 100644
--- a/drivers/compress/octeontx/zip_pmd.c
+++ b/drivers/compress/octeontx/zip_pmd.c
@@ -30,6 +30,67 @@ static const struct rte_compressdev_capabilities
 	RTE_COMP_END_OF_CAPABILITIES_LIST()
 };
 
+/*
+ * Reset session to default state for next set of stateless operation
+ */
+static inline void reset_stream(struct zip_stream *z_stream)
+{
+	union zip_inst_s *inst = (union zip_inst_s *)(z_stream->inst);
+
+	inst->s.bf = 1;
+	inst->s.ef = 0;
+}
+
+int
+zip_process_op(struct rte_comp_op *op,
+		struct zipvf_qp *qp,
+		struct zip_stream *zstrm)
+{
+	int ret;
+	union zip_inst_s *inst = zstrm->inst;
+	volatile union zip_zres_s *zresult = NULL;
+
+	if (op->op_type == RTE_COMP_OP_STATELESS)
+		zipvf_prepare_cmd_stateless(op, zstrm);
+
+	zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF];
+	zresult->s.compcode = 0;
+
+#ifdef ZIP_DBG
+	zip_dump_instruction(inst);
+#endif
+
+	/* Submit zip command */
+	ret = zipvf_push_command(qp, (void *)inst);
+
+	/* Check and Process results in sync mode */
+	do {
+	} while (!zresult->s.compcode);
+
+	if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) {
+		op->status = RTE_COMP_OP_STATUS_SUCCESS;
+	} else {
+		/* FATAL error cannot do anything */
+		ZIP_PMD_ERR("operation failed with error code:%d\n",
+			zresult->s.compcode);
+		if (zresult->s.compcode == ZIP_COMP_E_DSTOP)
+			op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+		else
+			op->status = RTE_COMP_OP_STATUS_ERROR;
+	}
+
+	ZIP_PMD_INFO("ret %d,written %d\n", ret, zresult->s.totalbyteswritten);
+
+	op->produced = zresult->s.totalbyteswritten;
+	op->consumed = zresult->s.totalbytesread;
+
+	if (zresult->s.ef == 1)
+		reset_stream(zstrm);
+
+	zresult->s.compcode = 0;
+	return ret;
+}
+
 /** Parse xform parameters and setup a stream */
 int
 zip_set_stream_parameters(struct rte_compressdev *dev,
@@ -399,6 +460,56 @@ zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
 	return 0;
 }
 
+static uint16_t
+zip_pmd_enqueue_burst_sync(void *queue_pair,
+		struct rte_comp_op **ops, uint16_t nb_ops)
+{
+	struct zipvf_qp *qp = queue_pair;
+	struct rte_comp_op *op;
+	struct zip_stream *zstrm;
+	int ret, i;
+	uint16_t enqd = 0;
+
+	for (i = 0; i < nb_ops; i++) {
+		op = ops[i];
+		if (op->op_type == RTE_COMP_OP_STATEFUL)
+			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+		else {
+			/* process stateless ops */
+			zstrm = (struct zip_stream *)op->private_xform;
+			ret = zstrm->func(op, qp, zstrm);
+		}
+
+		/* Whatever is out of op, put it into completion queue with
+		 * its status
+		 */
+		ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);
+		if (unlikely(ret < 0)) {
+			/* increment count if failed to enqueue op*/
+			qp->qp_stats.enqueue_err_count++;
+		} else {
+			qp->qp_stats.enqueued_count++;
+			enqd++;
+		}
+	}
+	return enqd;
+}
+
+static uint16_t
+zip_pmd_dequeue_burst_sync(void *queue_pair,
+		struct rte_comp_op **ops, uint16_t nb_ops)
+{
+	struct zipvf_qp *qp = queue_pair;
+
+	unsigned int nb_dequeued = 0;
+
+	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+			(void **)ops, nb_ops, NULL);
+	qp->qp_stats.dequeued_count += nb_dequeued;
+
+	return nb_dequeued;
+}
+
 struct rte_compressdev_ops octtx_zip_pmd_ops = {
 		.dev_configure		= zip_pmd_config,
 		.dev_start		= zip_pmd_start,
diff --git a/drivers/compress/octeontx/zipvf.c b/drivers/compress/octeontx/zipvf.c
index 436e80b78..d50f36d64 100644
--- a/drivers/compress/octeontx/zipvf.c
+++ b/drivers/compress/octeontx/zipvf.c
@@ -85,6 +85,55 @@ int zipvf_q_term(struct zipvf_qp *qp)
 	return 0;
 }
 
+
+int zipvf_push_command(struct zipvf_qp *qp, union zip_inst_s *cmd)
+{
+	zip_quex_doorbell_t dbell;
+	union zip_nptr_s ncp;
+	uint64_t *ncb_ptr;
+	struct zipvf_cmdq *cmdq = &qp->cmdq;
+	void *reg_base = qp->vf->vbar0;
+
+	/*Held queue lock*/
+	rte_spinlock_lock(&(cmdq->qlock));
+
+	/* Check space availability in zip cmd queue */
+	if ((((cmdq->sw_head - (uint64_t *)cmdq->va) * sizeof(uint64_t *)) +
+		ZIP_CMD_SIZE) == (ZIP_MAX_CMDQ_SIZE - 8)) {
+		/*Last buffer of the command queue*/
+		memcpy((uint8_t *)cmdq->sw_head,
+			(uint8_t *)cmd,
+			sizeof(union zip_inst_s));
+		/* move pointer to next loc in unit of 64-bit word */
+		cmdq->sw_head += ZIP_CMD_SIZE_WORDS;
+
+		/* now, point the "Next-Chunk Buffer Ptr" to sw_head */
+		ncb_ptr = cmdq->sw_head;
+		/* Pointing head again to cmdqueue base*/
+		cmdq->sw_head = (uint64_t *)cmdq->va;
+
+		ncp.u = 0ull;
+		ncp.s.addr = cmdq->iova;
+		*ncb_ptr = ncp.u;
+	} else {
+		/*Enough buffers available in the command queue*/
+		memcpy((uint8_t *)cmdq->sw_head,
+			(uint8_t *)cmd,
+			sizeof(union zip_inst_s));
+		cmdq->sw_head += ZIP_CMD_SIZE_WORDS;
+	}
+
+	rte_wmb();
+
+	/* Ringing ZIP VF doorbell */
+	dbell.u = 0ull;
+	dbell.s.dbell_cnt = 1;
+	zip_reg_write64(reg_base, ZIP_VQ_DOORBELL, dbell.u);
+
+	rte_spinlock_unlock(&(cmdq->qlock));
+	return 0;
+}
+
 int zipvf_create(struct rte_compressdev *compressdev, int vfid)
 {
 	struct   rte_pci_device *pdev = RTE_DEV_TO_PCI(compressdev->device);
diff --git a/drivers/compress/octeontx/zipvf.h b/drivers/compress/octeontx/zipvf.h
index 2388e2947..849094fc6 100644
--- a/drivers/compress/octeontx/zipvf.h
+++ b/drivers/compress/octeontx/zipvf.h
@@ -162,10 +162,174 @@ struct zip_vf {
 	/* pointer to pools */
 } __rte_cache_aligned;
 
+
+static inline int
+zipvf_prepare_in_buf(struct zip_stream *zstrm, struct rte_comp_op *op)
+{
+	uint32_t offset, inlen;
+	union zip_zptr_s *sg_list = NULL;
+	struct rte_mbuf *m_src;
+	union zip_inst_s *inst = zstrm->inst;
+	rte_iova_t iova;
+
+	inlen = op->src.length;
+	offset = op->src.offset;
+	m_src = op->m_src;
+
+	if (m_src->nb_segs == 1) {
+		/* Prepare direct input data pointer */
+		inst->s.dg = 0;
+		inst->s.inp_ptr_addr.s.addr =
+			rte_pktmbuf_iova_offset(m_src, offset);
+		inst->s.inp_ptr_ctl.s.length = inlen;
+		return 0;
+	}
+
+	ZIP_PMD_ERR("Input packet is segmented\n");
+
+	/* Packet is segmented, create gather buffer */
+	inst->s.dg = 1;
+	iova = rte_mempool_virt2iova(zstrm->bufs[IN_DATA_BUF]);
+	if (iova & 0xF) {
+		/* Align it to 16 Byte address */
+		iova = ZIP_ALIGN_ROUNDUP(iova, ZIP_SGPTR_ALIGN);
+	}
+
+	inst->s.inp_ptr_addr.s.addr = iova;
+	inst->s.inp_ptr_ctl.s.length = (m_src->nb_segs < MAX_SG_LEN) ?
+					(m_src->nb_segs) : MAX_SG_LEN;
+
+	sg_list = (union zip_zptr_s *)iova;
+
+	int i = 0;
+	rte_iova_t addr;
+	uint16_t len;
+
+	while (i < inst->s.inp_ptr_ctl.s.length) {
+		addr = rte_pktmbuf_iova_offset(m_src, offset);
+		len = rte_pktmbuf_data_len(m_src);
+		if (len > inlen)
+			len = inlen;
+		sg_list[i].s.addr = addr;
+		sg_list[i].s.length = len;
+		i++;
+		inlen -= len;
+		m_src = m_src->next;//try offset += len instead
+		offset = 0;
+	}
+	return 0;
+}
+
+static inline int
+zipvf_prepare_out_buf(struct zip_stream *zstrm, struct rte_comp_op *op)
+{
+	uint32_t offset;
+	union zip_zptr_s *sg_list = NULL;
+	struct rte_mbuf *m_dst;
+	union zip_inst_s *inst = zstrm->inst;
+	rte_iova_t iova;
+
+	offset = op->src.offset;
+	m_dst = op->m_dst;
+
+	if (m_dst->nb_segs == 1) {
+		/* Prepare direct input data pointer */
+		inst->s.ds = 0;
+		inst->s.out_ptr_addr.s.addr =
+			rte_pktmbuf_iova_offset(m_dst, offset);
+		inst->s.totaloutputlength = rte_pktmbuf_data_len(m_dst) -
+					    op->dst.offset;
+		inst->s.out_ptr_ctl.s.length = inst->s.totaloutputlength;
+		return 0;
+	}
+
+	ZIP_PMD_ERR("output packet is segmented\n");
+
+	/* Packet is segmented, create gather buffer */
+	inst->s.ds = 1;
+	iova = rte_mempool_virt2iova(zstrm->bufs[OUT_DATA_BUF]);
+	if (iova & 0xF) {
+		/* Align it to 16 Byte address */
+		iova = ZIP_ALIGN_ROUNDUP(iova, ZIP_SGPTR_ALIGN);
+	}
+
+	inst->s.out_ptr_addr.s.addr = iova;
+	inst->s.inp_ptr_ctl.s.length = (m_dst->nb_segs < MAX_SG_LEN) ?
+					(m_dst->nb_segs) : MAX_SG_LEN;
+
+	sg_list = (union zip_zptr_s *)iova;
+
+	int i = 0;
+
+	while (i < inst->s.inp_ptr_ctl.s.length) {
+		sg_list[i].s.addr = rte_pktmbuf_iova_offset(m_dst, offset);
+		sg_list[i].s.length = rte_pktmbuf_data_len(m_dst);
+		inst->s.totaloutputlength += sg_list[i].s.length;
+		m_dst = m_dst->next;//try offset += len instead
+		offset = 0;
+		i++;
+	}
+
+	return 0;
+}
+
+static inline int
+zipvf_prepare_cmd_stateless(struct rte_comp_op *op, struct zip_stream *zstrm)
+{
+	union zip_inst_s *inst = zstrm->inst;
+
+	/* set flush flag to always 1*/
+	inst->s.ef = 1;
+
+	if (inst->s.op == ZIP_OP_E_DECOMP)
+		inst->s.sf = 1;
+	else
+		inst->s.sf = 0;
+
+	/* Set input checksum */
+	inst->s.adlercrc32 = op->input_chksum;
+
+	/* Prepare gather buffers if input packet is segmented */
+	zipvf_prepare_in_buf(zstrm, op);
+	zipvf_prepare_out_buf(zstrm, op);
+
+	return 0;
+}
+
+#ifdef ZIP_DBG
+static inline void zip_dump_instruction(void *inst)
+{
+	union zip_inst_s *cmd83 = (union zip_inst_s *)inst;
+	printf("####### START ########\n");
+	printf("doneint:%d totaloutputlength:%d\n", cmd83->s.doneint,
+		cmd83->s.totaloutputlength);
+	printf("exnum:%d iv:%d exbits:%d hmif:%d halg:%d\n", cmd83->s.exn,
+		cmd83->s.iv, cmd83->s.exbits, cmd83->s.hmif, cmd83->s.halg);
+	printf("flush:%d speed:%d cc:%d\n", cmd83->s.sf,
+		cmd83->s.ss, cmd83->s.cc);
+	printf("eof:%d bof:%d op:%d dscatter:%d dgather:%d hgather:%d\n",
+		cmd83->s.ef, cmd83->s.bf, cmd83->s.op, cmd83->s.ds,
+		cmd83->s.dg, cmd83->s.hg);
+	printf("historylength:%d adler32:%d\n", cmd83->s.historylength,
+		cmd83->s.adlercrc32);
+	printf("ctx_ptr.addr:0x%lx\n", cmd83->s.ctx_ptr_addr.s.addr);
+	printf("ctx_ptr.len:%d\n", cmd83->s.ctx_ptr_ctl.s.length);
+	printf("history_ptr.addr:0x%lx\n", cmd83->s.his_ptr_addr.s.addr);
+	printf("history_ptr.len:%d\n", cmd83->s.his_ptr_ctl.s.length);
+	printf("inp_ptr.addr:0x%lx\n", cmd83->s.inp_ptr_addr.s.addr);
+	printf("inp_ptr.len:%d\n", cmd83->s.inp_ptr_ctl.s.length);
+	printf("out_ptr.addr:0x%lx\n", cmd83->s.out_ptr_addr.s.addr);
+	printf("out_ptr.len:%d\n", cmd83->s.out_ptr_ctl.s.length);
+	printf("result_ptr.len:%d\n", cmd83->s.res_ptr_ctl.s.length);
+	printf("####### END ########\n");
+}
+#endif
+
 int zipvf_create(struct rte_compressdev *compressdev, int vfid);
 int zipvf_destroy(struct rte_compressdev *compressdev);
 int zipvf_q_init(struct zipvf_qp *qp);
 int zipvf_q_term(struct zipvf_qp *qp);
+int zipvf_push_command(struct zipvf_qp *qp, union zip_inst_s *zcmd);
 int zip_set_stream_parameters(struct rte_compressdev *dev,
 				const struct rte_comp_xform *xform,
 				struct zip_stream *z_stream);
-- 
2.14.3

  parent reply	other threads:[~2018-06-05 10:36 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-05 10:35 [dpdk-dev] [PATCH v1 0/7] compress: add Octeontx ZIP compression PMD Shally Verma
2018-06-05 10:35 ` [dpdk-dev] [PATCH v1 1/7] compress/octeontx: add octeontx zip PMD support Shally Verma
2018-06-10 10:38   ` Jerin Jacob
2018-06-20  6:32     ` Verma, Shally
2018-06-19 22:15   ` De Lara Guarch, Pablo
2018-06-05 10:35 ` [dpdk-dev] [PATCH v1 2/7] compress/octeontx: add device setup PMD ops Shally Verma
2018-06-19 22:15   ` De Lara Guarch, Pablo
2018-06-20  6:04     ` Verma, Shally
2018-06-05 10:35 ` [dpdk-dev] [PATCH v1 3/7] compress/octeontx: add xform and stream create support Shally Verma
2018-06-19 22:13   ` De Lara Guarch, Pablo
2018-06-20  6:12     ` Verma, Shally
2018-06-05 10:35 ` Shally Verma [this message]
2018-06-19 22:18   ` [dpdk-dev] [PATCH v1 4/7] compress/octeontx: add ops enq deq apis De Lara Guarch, Pablo
2018-06-29  7:42     ` Verma, Shally
2018-06-05 10:35 ` [dpdk-dev] [PATCH v1 5/7] test: add octeontx zip PMD for compressdev tests Shally Verma
2018-06-19 22:18   ` De Lara Guarch, Pablo
2018-06-20  6:03     ` Verma, Shally
2018-06-05 10:35 ` [dpdk-dev] [PATCH v1 6/7] doc: add octeontx zip PMD documentation Shally Verma
2018-06-14 11:22   ` Kovacevic, Marko
2018-06-19 22:22   ` De Lara Guarch, Pablo
2018-06-05 10:35 ` [dpdk-dev] [PATCH v1 7/7] drivers/compress: add meson.build support Shally Verma
2018-06-19 22:30   ` De Lara Guarch, Pablo
2018-06-20  6:02     ` Verma, Shally
2018-06-20  7:25       ` De Lara Guarch, Pablo
2018-06-20  7:36         ` Verma, Shally

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1528194913-25893-5-git-send-email-shally.verma@caviumnetworks.com \
    --to=shally.verma@caviumnetworks.com \
    --cc=ashish.gupta@caviumnetworks.com \
    --cc=dev@dpdk.org \
    --cc=fiona.trahe@intel.com \
    --cc=mchalla@caviumnetworks.com \
    --cc=pablo.de.lara.guarch@intel.com \
    --cc=pathreya@caviumnetworks.com \
    --cc=sunila.sahu@caviumnetworks.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).