DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ashish Gupta <Ashish.Gupta@caviumnetworks.com>
To: pablo.de.lara.guarch@intel.com
Cc: dev@dpdk.org, narayanaprasad.athreya@cavium.com,
	mahipal.challa@cavium.com,
	Ashish Gupta <ashish.gupta@caviumnetworks.com>,
	Shally Verma <shally.verma@caviumnetworks.com>,
	Sunila Sahu <sunila.sahu@caviumnetworks.com>
Subject: [dpdk-dev] [PATCH v3 2/6] compress/octeontx: add device setup PMD ops
Date: Sat, 21 Jul 2018 00:34:43 +0530	[thread overview]
Message-ID: <20180720190447.7979-3-Ashish.Gupta@caviumnetworks.com> (raw)
In-Reply-To: <20180720190447.7979-1-Ashish.Gupta@caviumnetworks.com>

Add compression PMD device and queue pair setup ops.

Signed-off-by: Ashish Gupta <ashish.gupta@caviumnetworks.com>
Signed-off-by: Shally Verma <shally.verma@caviumnetworks.com>
Signed-off-by: Sunila Sahu <sunila.sahu@caviumnetworks.com>
---
 drivers/compress/octeontx/otx_zip.c     |  71 +++++++++
 drivers/compress/octeontx/otx_zip.h     |  57 ++++++-
 drivers/compress/octeontx/otx_zip_pmd.c | 253 ++++++++++++++++++++++++++++++++
 3 files changed, 379 insertions(+), 2 deletions(-)

diff --git a/drivers/compress/octeontx/otx_zip.c b/drivers/compress/octeontx/otx_zip.c
index 4a5591c97..255b095de 100644
--- a/drivers/compress/octeontx/otx_zip.c
+++ b/drivers/compress/octeontx/otx_zip.c
@@ -18,6 +18,77 @@ zip_reg_write64(uint8_t *hw_addr, uint64_t offset, uint64_t val)
 	*(uint64_t *)(base + offset) = val;
 }
 
+static void
+zip_q_enable(struct zipvf_qp *qp)
+{
+	zip_vqx_ena_t que_ena;
+
+	/*ZIP VFx command queue init*/
+	que_ena.u = 0ull;
+	que_ena.s.ena = 1;
+
+	zip_reg_write64(qp->vf->vbar0, ZIP_VQ_ENA, que_ena.u);
+	rte_wmb();
+}
+
+/* initialize given qp on zip device */
+int
+zipvf_q_init(struct zipvf_qp *qp)
+{
+	zip_vqx_sbuf_addr_t que_sbuf_addr;
+
+	uint64_t size;
+	void *cmdq_addr;
+	uint64_t iova;
+	struct zipvf_cmdq *cmdq = &qp->cmdq;
+	struct zip_vf *vf = qp->vf;
+
+	/* allocate and setup instruction queue */
+	size = ZIP_MAX_CMDQ_SIZE;
+	size = ZIP_ALIGN_ROUNDUP(size, ZIP_CMDQ_ALIGN);
+
+	cmdq_addr = rte_zmalloc(qp->name, size, ZIP_CMDQ_ALIGN);
+	if (cmdq_addr == NULL)
+		return -1;
+
+	cmdq->sw_head = (uint64_t *)cmdq_addr;
+	cmdq->va = (uint8_t *)cmdq_addr;
+	iova = rte_mem_virt2iova(cmdq_addr);
+
+	cmdq->iova = iova;
+
+	que_sbuf_addr.u = 0ull;
+	que_sbuf_addr.s.ptr = (cmdq->iova >> 7);
+	zip_reg_write64(vf->vbar0, ZIP_VQ_SBUF_ADDR, que_sbuf_addr.u);
+
+	zip_q_enable(qp);
+
+	memset(cmdq->va, 0, ZIP_MAX_CMDQ_SIZE);
+	rte_spinlock_init(&cmdq->qlock);
+
+	return 0;
+}
+
+int
+zipvf_q_term(struct zipvf_qp *qp)
+{
+	struct zipvf_cmdq *cmdq = &qp->cmdq;
+	zip_vqx_ena_t que_ena;
+	struct zip_vf *vf = qp->vf;
+
+	if (cmdq->va != NULL) {
+		memset(cmdq->va, 0, ZIP_MAX_CMDQ_SIZE);
+		rte_free(cmdq->va);
+	}
+
+	/*Disabling the ZIP queue*/
+	que_ena.u = 0ull;
+	zip_reg_write64(vf->vbar0, ZIP_VQ_ENA, que_ena.u);
+
+	return 0;
+}
+
+
 int
 zipvf_create(struct rte_compressdev *compressdev)
 {
diff --git a/drivers/compress/octeontx/otx_zip.h b/drivers/compress/octeontx/otx_zip.h
index 8a58f31f8..1289919cb 100644
--- a/drivers/compress/octeontx/otx_zip.h
+++ b/drivers/compress/octeontx/otx_zip.h
@@ -77,8 +77,54 @@ int octtx_zip_logtype_driver;
 	ZIP_PMD_LOG(INFO, fmt, ## args)
 #define ZIP_PMD_ERR(fmt, args...) \
 	ZIP_PMD_LOG(ERR, fmt, ## args)
-#define ZIP_PMD_WARN(fmt, args...) \
-	ZIP_PMD_LOG(WARNING, fmt, ## args)
+
+/* resources required to process stream */
+enum {
+	RES_BUF = 0,
+	CMD_BUF,
+	HASH_CTX_BUF,
+	DECOMP_CTX_BUF,
+	IN_DATA_BUF,
+	OUT_DATA_BUF,
+	HISTORY_DATA_BUF,
+	MAX_BUFS_PER_STREAM
+} NUM_BUFS_PER_STREAM;
+
+
+struct zipvf_qp;
+
+
+/**
+ * ZIP instruction Queue
+ */
+struct zipvf_cmdq {
+	rte_spinlock_t qlock;
+	/* queue lock */
+	uint64_t *sw_head;
+	/* pointer to start of 8-byte word length queue-head */
+	uint8_t *va;
+	/* pointer to instruction queue virtual address */
+	rte_iova_t iova;
+	/* iova addr of cmdq head*/
+};
+
+/**
+ * ZIP device queue structure
+ */
+struct zipvf_qp {
+	struct zipvf_cmdq cmdq;
+	/* Hardware instruction queue structure */
+	struct rte_ring *processed_pkts;
+	/* Ring for placing processed packets */
+	struct rte_compressdev_stats qp_stats;
+	/* Queue pair statistics */
+	uint16_t id;
+	/* Queue Pair Identifier */
+	const char *name;
+	/* Unique Queue Pair Name */
+	struct zip_vf *vf;
+	/* pointer to device, queue belongs to */
+} __rte_cache_aligned;
 
 /**
  * ZIP VF device structure.
@@ -104,6 +150,13 @@ zipvf_create(struct rte_compressdev *compressdev);
 int
 zipvf_destroy(struct rte_compressdev *compressdev);
 
+int
+zipvf_q_init(struct zipvf_qp *qp);
+
+int
+zipvf_q_term(struct zipvf_qp *qp);
+
+
 uint64_t
 zip_reg_read64(uint8_t *hw_addr, uint64_t offset);
 
diff --git a/drivers/compress/octeontx/otx_zip_pmd.c b/drivers/compress/octeontx/otx_zip_pmd.c
index b2cb115e0..f6285508a 100644
--- a/drivers/compress/octeontx/otx_zip_pmd.c
+++ b/drivers/compress/octeontx/otx_zip_pmd.c
@@ -11,8 +11,261 @@
 
 #include "otx_zip.h"
 
+static const struct rte_compressdev_capabilities
+				octtx_zip_pmd_capabilities[] = {
+	{	.algo = RTE_COMP_ALGO_DEFLATE,
+		/* Deflate */
+		.comp_feature_flags =	RTE_COMP_FF_HUFFMAN_FIXED |
+					RTE_COMP_FF_HUFFMAN_DYNAMIC,
+		/* Non sharable Priv XFORM and Stateless */
+		.window_size = {
+				.min = 1,
+				.max = 14,
+				.increment = 1
+				/* size supported 2^1 to 2^14 */
+		},
+	},
+	RTE_COMP_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+zip_pmd_config(struct rte_compressdev *dev,
+		struct rte_compressdev_config *config)
+{
+	int nb_streams;
+	char res_pool[RTE_MEMZONE_NAMESIZE];
+	struct zip_vf *vf;
+	struct rte_mempool *zip_buf_mp;
+
+	if (!config || !dev)
+		return -EIO;
+
+	vf = (struct zip_vf *)(dev->data->dev_private);
+
+	/* create pool with maximum numbers of resources
+	 * required by streams
+	 */
+
+	/* use common pool for non-shareable priv_xform and stream */
+	nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
+
+	snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
+		 dev->data->dev_id);
+
+	/** TBD Should we use the per core object cache for stream resources */
+	zip_buf_mp = rte_mempool_create(
+			res_pool,
+			nb_streams * MAX_BUFS_PER_STREAM,
+			ZIP_BUF_SIZE,
+			0,
+			0,
+			NULL,
+			NULL,
+			NULL,
+			NULL,
+			SOCKET_ID_ANY,
+			0);
+
+	if (zip_buf_mp == NULL) {
+		ZIP_PMD_ERR(
+			"Failed to create buf mempool octtx_zip_res_pool%u",
+			dev->data->dev_id);
+		return -1;
+	}
+
+	vf->zip_mp = zip_buf_mp;
+
+	return 0;
+}
+
+/** Start device */
+static int
+zip_pmd_start(__rte_unused struct rte_compressdev *dev)
+{
+	return 0;
+}
+
+/** Stop device */
+static void
+zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
+{
+
+}
+
+/** Close device */
+static int
+zip_pmd_close(struct rte_compressdev *dev)
+{
+	if (dev == NULL)
+		return -1;
+
+	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
+	rte_mempool_free(vf->zip_mp);
+
+	return 0;
+}
+
+/** Get device statistics */
+static void
+zip_pmd_stats_get(struct rte_compressdev *dev,
+		struct rte_compressdev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->qp_stats.enqueued_count;
+		stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+	}
+}
+
+/** Reset device statistics */
+static void
+zip_pmd_stats_reset(struct rte_compressdev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
+		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	}
+}
+
+/** Get device info */
+static void
+zip_pmd_info_get(struct rte_compressdev *dev,
+		struct rte_compressdev_info *dev_info)
+{
+	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
+
+	if (dev_info != NULL) {
+		dev_info->driver_name = dev->device->driver->name;
+		dev_info->feature_flags = dev->feature_flags;
+		dev_info->capabilities = octtx_zip_pmd_capabilities;
+		dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
+	}
+}
+
+/** Release queue pair */
+static int
+zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
+{
+	struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
+
+	if (qp != NULL) {
+		zipvf_q_term(qp);
+
+		if (qp->processed_pkts)
+			rte_ring_free(qp->processed_pkts);
+
+		rte_free(qp);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
+		unsigned int ring_size, int socket_id)
+{
+	struct rte_ring *r;
+
+	r = rte_ring_lookup(qp->name);
+	if (r) {
+		if (rte_ring_get_size(r) >= ring_size) {
+			ZIP_PMD_INFO("Reusing existing ring %s for processed"
+					" packets", qp->name);
+			return r;
+		}
+
+		ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
+				" packets", qp->name);
+		return NULL;
+	}
+
+	return rte_ring_create(qp->name, ring_size, socket_id,
+						RING_F_EXACT_SZ);
+}
+
+/** Setup a queue pair */
+static int
+zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+		uint32_t max_inflight_ops, int socket_id)
+{
+	struct zipvf_qp *qp = NULL;
+	struct zip_vf *vf;
+	char *name;
+	int ret;
+
+	if (!dev)
+		return -1;
+
+	vf = (struct zip_vf *) (dev->data->dev_private);
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL) {
+		ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
+		return 0;
+	}
+
+	name =  rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
+	snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
+		 "zip_pmd_%u_qp_%u",
+		 dev->data->dev_id, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket(name, sizeof(*qp),
+				RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL)
+		return (-ENOMEM);
+
+	qp->name = name;
+
+	/* Create completion queue upto max_inflight_ops */
+	qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
+						max_inflight_ops, socket_id);
+	if (qp->processed_pkts == NULL)
+		goto qp_setup_cleanup;
+
+	qp->id = qp_id;
+	qp->vf = vf;
+
+	ret = zipvf_q_init(qp);
+	if (ret < 0)
+		goto qp_setup_cleanup;
+
+	dev->data->queue_pairs[qp_id] = qp;
+
+	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	return 0;
+
+qp_setup_cleanup:
+	if (qp->processed_pkts)
+		rte_ring_free(qp->processed_pkts);
+	if (qp)
+		rte_free(qp);
+	return -1;
+}
+
 struct rte_compressdev_ops octtx_zip_pmd_ops = {
+		.dev_configure		= zip_pmd_config,
+		.dev_start		= zip_pmd_start,
+		.dev_stop		= zip_pmd_stop,
+		.dev_close		= zip_pmd_close,
+
+		.stats_get		= zip_pmd_stats_get,
+		.stats_reset		= zip_pmd_stats_reset,
+
+		.dev_infos_get		= zip_pmd_info_get,
 
+		.queue_pair_setup	= zip_pmd_qp_setup,
+		.queue_pair_release	= zip_pmd_qp_release,
 };
 
 static int
-- 
2.14.3

  parent reply	other threads:[~2018-07-20 18:42 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-07-20 19:04 [dpdk-dev] [PATCH v3 0/6] compress: add Octeontx ZIP compression PMD Ashish Gupta
2018-07-20 19:04 ` [dpdk-dev] [PATCH v3 1/6] compress/octeontx: add octeontx zip PMD Ashish Gupta
2018-07-20 19:04 ` Ashish Gupta [this message]
2018-07-20 19:04 ` [dpdk-dev] [PATCH v3 3/6] compress/octeontx: add xform and stream create support Ashish Gupta
2018-07-20 19:04 ` [dpdk-dev] [PATCH v3 4/6] compress/octeontx: add ops enq deq apis Ashish Gupta
2018-07-23 22:40   ` De Lara Guarch, Pablo
2018-07-24  8:22     ` Verma, Shally
2018-07-20 19:04 ` [dpdk-dev] [PATCH v3 5/6] doc: add Octeonx zip guide Ashish Gupta
2018-07-20 19:04 ` [dpdk-dev] [PATCH v3 6/6] usertools: update devbind for octeontx zip device Ashish Gupta

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180720190447.7979-3-Ashish.Gupta@caviumnetworks.com \
    --to=ashish.gupta@caviumnetworks.com \
    --cc=dev@dpdk.org \
    --cc=mahipal.challa@cavium.com \
    --cc=narayanaprasad.athreya@cavium.com \
    --cc=pablo.de.lara.guarch@intel.com \
    --cc=shally.verma@caviumnetworks.com \
    --cc=sunila.sahu@caviumnetworks.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).