From: Ashish Gupta <Ashish.Gupta@caviumnetworks.com>
To: pablo.de.lara.guarch@intel.com
Cc: dev@dpdk.org, narayanaprasad.athreya@cavium.com,
mahipal.challa@cavium.com,
Ashish Gupta <ashish.gupta@caviumnetworks.com>,
Shally Verma <shally.verma@caviumnetworks.com>,
Sunila Sahu <sunila.sahu@caviumnetworks.com>
Subject: [dpdk-dev] [PATCH v3 3/6] compress/octeontx: add xform and stream create support
Date: Sat, 21 Jul 2018 00:34:44 +0530 [thread overview]
Message-ID: <20180720190447.7979-4-Ashish.Gupta@caviumnetworks.com> (raw)
In-Reply-To: <20180720190447.7979-1-Ashish.Gupta@caviumnetworks.com>
implement non-shareable private xform for applications to
initiate stateless Compression/decompression.
Signed-off-by: Ashish Gupta <ashish.gupta@caviumnetworks.com>
Signed-off-by: Shally Verma <shally.verma@caviumnetworks.com>
Signed-off-by: Sunila Sahu <sunila.sahu@caviumnetworks.com>
---
drivers/compress/octeontx/otx_zip.h | 19 +++-
drivers/compress/octeontx/otx_zip_pmd.c | 149 ++++++++++++++++++++++++++++++++
2 files changed, 167 insertions(+), 1 deletion(-)
diff --git a/drivers/compress/octeontx/otx_zip.h b/drivers/compress/octeontx/otx_zip.h
index 1289919cb..3fcd86a86 100644
--- a/drivers/compress/octeontx/otx_zip.h
+++ b/drivers/compress/octeontx/otx_zip.h
@@ -90,9 +90,24 @@ enum {
MAX_BUFS_PER_STREAM
} NUM_BUFS_PER_STREAM;
-
+struct zip_stream;
struct zipvf_qp;
+/* Algorithm handler function prototype */
+typedef int (*comp_func_t)(struct rte_comp_op *op,
+ struct zipvf_qp *qp, struct zip_stream *zstrm);
+
+/**
+ * ZIP private stream structure
+ */
+struct zip_stream {
+ union zip_inst_s *inst;
+ /* zip instruction pointer */
+ comp_func_t func;
+ /* function to process comp operation */
+ void *bufs[MAX_BUFS_PER_STREAM];
+} _rte_cache_aligned;
+
/**
* ZIP instruction Queue
@@ -156,6 +171,8 @@ zipvf_q_init(struct zipvf_qp *qp);
int
zipvf_q_term(struct zipvf_qp *qp);
+int
+zipvf_push_command(struct zipvf_qp *qp, union zip_inst_s *zcmd);
uint64_t
zip_reg_read64(uint8_t *hw_addr, uint64_t offset);
diff --git a/drivers/compress/octeontx/otx_zip_pmd.c b/drivers/compress/octeontx/otx_zip_pmd.c
index f6285508a..fac547920 100644
--- a/drivers/compress/octeontx/otx_zip_pmd.c
+++ b/drivers/compress/octeontx/otx_zip_pmd.c
@@ -28,6 +28,103 @@ static const struct rte_compressdev_capabilities
RTE_COMP_END_OF_CAPABILITIES_LIST()
};
+/** Parse xform parameters and setup a stream */
+static int
+zip_set_stream_parameters(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ struct zip_stream *z_stream)
+{
+ int ret;
+ union zip_inst_s *inst;
+ struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
+ void *res;
+
+ /* Allocate resources required by a stream */
+ ret = rte_mempool_get_bulk(vf->zip_mp,
+ z_stream->bufs, MAX_BUFS_PER_STREAM);
+ if (ret < 0)
+ return -1;
+
+ /* get one command buffer from pool and set up */
+ inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF];
+ res = z_stream->bufs[RES_BUF];
+
+ memset(inst->u, 0, sizeof(inst->u));
+
+ /* set bf for only first ops of stream */
+ inst->s.bf = 1;
+
+ if (xform->type == RTE_COMP_COMPRESS) {
+ inst->s.op = ZIP_OP_E_COMP;
+
+ switch (xform->compress.deflate.huffman) {
+ case RTE_COMP_HUFFMAN_DEFAULT:
+ inst->s.cc = ZIP_CC_DEFAULT;
+ break;
+ case RTE_COMP_HUFFMAN_FIXED:
+ inst->s.cc = ZIP_CC_FIXED_HUFF;
+ break;
+ case RTE_COMP_HUFFMAN_DYNAMIC:
+ inst->s.cc = ZIP_CC_DYN_HUFF;
+ break;
+ default:
+ ret = -1;
+ goto err;
+ }
+
+ switch (xform->compress.level) {
+ case RTE_COMP_LEVEL_MIN:
+ inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
+ break;
+ case RTE_COMP_LEVEL_MAX:
+ inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
+ break;
+ case RTE_COMP_LEVEL_NONE:
+ ZIP_PMD_ERR("Compression level not supported");
+ ret = -1;
+ goto err;
+ default:
+ /* for any value between min and max , choose
+ * PMD default.
+ */
+ inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
+ break;
+ }
+ } else if (xform->type == RTE_COMP_DECOMPRESS) {
+ inst->s.op = ZIP_OP_E_DECOMP;
+ /* from HRM,
+ * For DEFLATE decompression, [CC] must be 0x0.
+ * For decompression, [SS] must be 0x0
+ */
+ inst->s.cc = 0;
+ /* Speed bit should not be set for decompression */
+ inst->s.ss = 0;
+ /* decompression context is supported only for STATEFUL
+ * operations. Currently we support STATELESS ONLY so
+ * skip setting of ctx pointer
+ */
+
+ } else {
+ ZIP_PMD_ERR("\nxform type not supported");
+ ret = -1;
+ goto err;
+ }
+
+ inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
+ inst->s.res_ptr_ctl.s.length = 0;
+
+ z_stream->inst = inst;
+
+ return 0;
+
+err:
+ rte_mempool_put_bulk(vf->zip_mp,
+ (void *)&(z_stream->bufs[0]),
+ MAX_BUFS_PER_STREAM);
+
+ return ret;
+}
+
/** Configure device */
static int
zip_pmd_config(struct rte_compressdev *dev,
@@ -253,6 +350,53 @@ zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
return -1;
}
+static int
+zip_pmd_stream_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform, void **stream)
+{
+ int ret;
+ struct zip_stream *strm = NULL;
+
+ strm = rte_malloc(NULL,
+ sizeof(struct zip_stream), 0);
+
+ if (strm == NULL)
+ return (-ENOMEM);
+
+ ret = zip_set_stream_parameters(dev, xform, strm);
+ if (ret < 0) {
+ ZIP_PMD_ERR("failed configure xform parameters");
+ rte_free(strm);
+ return ret;
+ }
+ *stream = strm;
+ return 0;
+}
+
+static int
+zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
+{
+ struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
+ struct zip_stream *z_stream;
+
+ if (stream == NULL)
+ return 0;
+
+ z_stream = (struct zip_stream *)stream;
+
+ /* Free resources back to pool */
+ rte_mempool_put_bulk(vf->zip_mp,
+ (void *)&(z_stream->bufs[0]),
+ MAX_BUFS_PER_STREAM);
+
+ /* Zero out the whole structure */
+ memset(stream, 0, sizeof(struct zip_stream));
+ rte_free(stream);
+
+ return 0;
+}
+
+
struct rte_compressdev_ops octtx_zip_pmd_ops = {
.dev_configure = zip_pmd_config,
.dev_start = zip_pmd_start,
@@ -266,6 +410,11 @@ struct rte_compressdev_ops octtx_zip_pmd_ops = {
.queue_pair_setup = zip_pmd_qp_setup,
.queue_pair_release = zip_pmd_qp_release,
+
+ .private_xform_create = zip_pmd_stream_create,
+ .private_xform_free = zip_pmd_stream_free,
+ .stream_create = NULL,
+ .stream_free = NULL
};
static int
--
2.14.3
next prev parent reply other threads:[~2018-07-20 18:42 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-07-20 19:04 [dpdk-dev] [PATCH v3 0/6] compress: add Octeontx ZIP compression PMD Ashish Gupta
2018-07-20 19:04 ` [dpdk-dev] [PATCH v3 1/6] compress/octeontx: add octeontx zip PMD Ashish Gupta
2018-07-20 19:04 ` [dpdk-dev] [PATCH v3 2/6] compress/octeontx: add device setup PMD ops Ashish Gupta
2018-07-20 19:04 ` Ashish Gupta [this message]
2018-07-20 19:04 ` [dpdk-dev] [PATCH v3 4/6] compress/octeontx: add ops enq deq apis Ashish Gupta
2018-07-23 22:40 ` De Lara Guarch, Pablo
2018-07-24 8:22 ` Verma, Shally
2018-07-20 19:04 ` [dpdk-dev] [PATCH v3 5/6] doc: add Octeonx zip guide Ashish Gupta
2018-07-20 19:04 ` [dpdk-dev] [PATCH v3 6/6] usertools: update devbind for octeontx zip device Ashish Gupta
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180720190447.7979-4-Ashish.Gupta@caviumnetworks.com \
--to=ashish.gupta@caviumnetworks.com \
--cc=dev@dpdk.org \
--cc=mahipal.challa@cavium.com \
--cc=narayanaprasad.athreya@cavium.com \
--cc=pablo.de.lara.guarch@intel.com \
--cc=shally.verma@caviumnetworks.com \
--cc=sunila.sahu@caviumnetworks.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).