DPDK patches and discussions
 help / color / mirror / Atom feed
From: Shally Verma <shally.verma@caviumnetworks.com>
To: pablo.de.lara.guarch@intel.com
Cc: dev@dpdk.org, pathreya@caviumnetworks.com,
	mchalla@caviumnetworks.com, ashish.gupta@caviumnetworks.com,
	sunila.sahu@caviumnetworks.com
Subject: [dpdk-dev] [PATCH v3 2/5] compress/zlib: add device PMD ops
Date: Sat, 21 Jul 2018 23:47:46 +0530	[thread overview]
Message-ID: <1532197069-24224-3-git-send-email-shally.verma@caviumnetworks.com> (raw)
In-Reply-To: <1532197069-24224-1-git-send-email-shally.verma@caviumnetworks.com>

From: Ashish Gupta <ashish.gupta@caviumnetworks.com>

Implement device configure and queue pair
setup PMD ops

Signed-off-by: Sunila Sahu <sunila.sahu@caviumnetworks.com>
Signed-off-by: Shally Verma <shally.verma@caviumnetworks.com>
Signed-off-by: Ashish Gupta <ashish.gupta@caviumnetworks.com>
---
 drivers/compress/zlib/Makefile           |   1 +
 drivers/compress/zlib/meson.build        |   2 +-
 drivers/compress/zlib/zlib_pmd.c         |   2 +
 drivers/compress/zlib/zlib_pmd_ops.c     | 238 +++++++++++++++++++++++++++++++
 drivers/compress/zlib/zlib_pmd_private.h |  35 +++++
 5 files changed, 277 insertions(+), 1 deletion(-)

diff --git a/drivers/compress/zlib/Makefile b/drivers/compress/zlib/Makefile
index bd322c9..5cf8de6 100644
--- a/drivers/compress/zlib/Makefile
+++ b/drivers/compress/zlib/Makefile
@@ -24,5 +24,6 @@ LDLIBS += -lrte_bus_vdev
 
 # library source files
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += zlib_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += zlib_pmd_ops.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/compress/zlib/meson.build b/drivers/compress/zlib/meson.build
index 3f0a77b..7748de2 100644
--- a/drivers/compress/zlib/meson.build
+++ b/drivers/compress/zlib/meson.build
@@ -7,7 +7,7 @@ if not dep.found()
 endif
 
 deps += 'bus_vdev'
-sources = files('zlib_pmd.c')
+sources = files('zlib_pmd.c', 'zlib_pmd_ops.c')
 ext_deps += dep
 pkgconfig_extra_libs += '-lz'
 
diff --git a/drivers/compress/zlib/zlib_pmd.c b/drivers/compress/zlib/zlib_pmd.c
index 39d3628..9363808 100644
--- a/drivers/compress/zlib/zlib_pmd.c
+++ b/drivers/compress/zlib/zlib_pmd.c
@@ -21,6 +21,8 @@ zlib_create(const char *name,
 		return -ENODEV;
 	}
 
+	dev->dev_ops = rte_zlib_pmd_ops;
+
 	return 0;
 }
 
diff --git a/drivers/compress/zlib/zlib_pmd_ops.c b/drivers/compress/zlib/zlib_pmd_ops.c
new file mode 100644
index 0000000..645c5b1
--- /dev/null
+++ b/drivers/compress/zlib/zlib_pmd_ops.c
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+
+#include "zlib_pmd_private.h"
+
+static const struct rte_compressdev_capabilities zlib_pmd_capabilities[] = {
+	{   /* Deflate */
+		.algo = RTE_COMP_ALGO_DEFLATE,
+		.comp_feature_flags = (RTE_COMP_FF_NONCOMPRESSED_BLOCKS |
+					RTE_COMP_FF_HUFFMAN_FIXED |
+					RTE_COMP_FF_HUFFMAN_DYNAMIC |
+					RTE_COMP_FF_OOP_SGL_IN_SGL_OUT),
+		.window_size = {
+			.min = 8,
+			.max = 15,
+			.increment = 1
+		},
+	},
+
+	RTE_COMP_END_OF_CAPABILITIES_LIST()
+
+};
+
+/** Configure device */
+static int
+zlib_pmd_config(struct rte_compressdev *dev,
+		struct rte_compressdev_config *config)
+{
+	struct rte_mempool *mp;
+	char mp_name[RTE_MEMPOOL_NAMESIZE];
+	struct zlib_private *internals = dev->data->dev_private;
+
+	snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+			"stream_mp_%u", dev->data->dev_id);
+	mp = internals->mp;
+	if (mp == NULL) {
+		mp = rte_mempool_create(mp_name,
+				config->max_nb_priv_xforms +
+				config->max_nb_streams,
+				sizeof(struct zlib_priv_xform),
+				0, 0, NULL, NULL, NULL,
+				NULL, config->socket_id,
+				0);
+		if (mp == NULL) {
+			ZLIB_PMD_ERR("Cannot create private xform pool on "
+			"socket %d\n", config->socket_id);
+			return -ENOMEM;
+		}
+		internals->mp = mp;
+	}
+	return 0;
+}
+
+/** Start device */
+static int
+zlib_pmd_start(__rte_unused struct rte_compressdev *dev)
+{
+	return 0;
+}
+
+/** Stop device */
+static void
+zlib_pmd_stop(__rte_unused struct rte_compressdev *dev)
+{
+}
+
+/** Close device */
+static int
+zlib_pmd_close(struct rte_compressdev *dev)
+{
+	struct zlib_private *internals = dev->data->dev_private;
+	rte_mempool_free(internals->mp);
+	internals->mp = NULL;
+	return 0;
+}
+
+/** Get device statistics */
+static void
+zlib_pmd_stats_get(struct rte_compressdev *dev,
+		struct rte_compressdev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->qp_stats.enqueued_count;
+		stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+	}
+}
+
+/** Reset device statistics */
+static void
+zlib_pmd_stats_reset(struct rte_compressdev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	}
+}
+
+/** Get device info */
+static void
+zlib_pmd_info_get(struct rte_compressdev *dev,
+		struct rte_compressdev_info *dev_info)
+{
+	if (dev_info != NULL) {
+		dev_info->driver_name = dev->device->name;
+		dev_info->feature_flags = dev->feature_flags;
+		dev_info->capabilities = zlib_pmd_capabilities;
+	}
+}
+
+/** Release queue pair */
+static int
+zlib_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
+{
+	struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+
+	if (qp != NULL) {
+		rte_ring_free(qp->processed_pkts);
+		rte_free(qp);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+zlib_pmd_qp_set_unique_name(struct rte_compressdev *dev,
+		struct zlib_qp *qp)
+{
+	unsigned int n = snprintf(qp->name, sizeof(qp->name),
+				"zlib_pmd_%u_qp_%u",
+				dev->data->dev_id, qp->id);
+
+	if (n >= sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+zlib_pmd_qp_create_processed_pkts_ring(struct zlib_qp *qp,
+		unsigned int ring_size, int socket_id)
+{
+	struct rte_ring *r = qp->processed_pkts;
+
+	if (r) {
+		if (rte_ring_get_size(r) >= ring_size) {
+			ZLIB_PMD_INFO("Reusing existing ring %s for processed"
+					" packets", qp->name);
+			return r;
+		}
+
+		ZLIB_PMD_ERR("Unable to reuse existing ring %s for processed"
+				" packets", qp->name);
+		return NULL;
+	}
+
+	return rte_ring_create(qp->name, ring_size, socket_id,
+						RING_F_EXACT_SZ);
+}
+
+/** Setup a queue pair */
+static int
+zlib_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+		uint32_t max_inflight_ops, int socket_id)
+{
+	struct zlib_qp *qp = NULL;
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		zlib_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("ZLIB PMD Queue Pair", sizeof(*qp),
+					RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL)
+		return (-ENOMEM);
+
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	if (zlib_pmd_qp_set_unique_name(dev, qp))
+		goto qp_setup_cleanup;
+
+	qp->processed_pkts = zlib_pmd_qp_create_processed_pkts_ring(qp,
+			max_inflight_ops, socket_id);
+	if (qp->processed_pkts == NULL)
+		goto qp_setup_cleanup;
+
+	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	return 0;
+
+qp_setup_cleanup:
+	if (qp) {
+		rte_free(qp);
+		qp = NULL;
+	}
+	return -1;
+}
+
+struct rte_compressdev_ops zlib_pmd_ops = {
+		.dev_configure		= zlib_pmd_config,
+		.dev_start		= zlib_pmd_start,
+		.dev_stop		= zlib_pmd_stop,
+		.dev_close		= zlib_pmd_close,
+
+		.stats_get		= zlib_pmd_stats_get,
+		.stats_reset		= zlib_pmd_stats_reset,
+
+		.dev_infos_get		= zlib_pmd_info_get,
+
+		.queue_pair_setup	= zlib_pmd_qp_setup,
+		.queue_pair_release	= zlib_pmd_qp_release,
+
+		.private_xform_create	= NULL,
+		.private_xform_free	= NULL,
+
+		.stream_create	= NULL,
+		.stream_free	= NULL
+};
+
+struct rte_compressdev_ops *rte_zlib_pmd_ops = &zlib_pmd_ops;
diff --git a/drivers/compress/zlib/zlib_pmd_private.h b/drivers/compress/zlib/zlib_pmd_private.h
index d26a740..0e391a4 100644
--- a/drivers/compress/zlib/zlib_pmd_private.h
+++ b/drivers/compress/zlib/zlib_pmd_private.h
@@ -27,6 +27,41 @@ int zlib_logtype_driver;
 	ZLIB_PMD_LOG(WARNING, fmt, ## args)
 
 struct zlib_private {
+	struct rte_mempool *mp;
 };
 
+struct zlib_qp {
+	struct rte_ring *processed_pkts;
+	/**< Ring for placing process packets */
+	struct rte_compressdev_stats qp_stats;
+	/**< Queue pair statistics */
+	uint16_t id;
+	/**< Queue Pair Identifier */
+	char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+	/**< Unique Queue Pair Name */
+} __rte_cache_aligned;
+
+/* Algorithm handler function prototype */
+typedef void (*comp_func_t)(struct rte_comp_op *op, z_stream *strm);
+
+typedef int (*comp_free_t)(z_stream *strm);
+
+/** ZLIB Stream structure */
+struct zlib_stream {
+	z_stream strm;
+	/**< zlib stream structure */
+	comp_func_t comp;
+	/**< Operation (compression/decompression) */
+	comp_free_t free;
+	/**< Free Operation (compression/decompression) */
+} __rte_cache_aligned;
+
+/** ZLIB private xform structure */
+struct zlib_priv_xform {
+	struct zlib_stream stream;
+} __rte_cache_aligned;
+
+/** Device specific operations function pointer structure */
+extern struct rte_compressdev_ops *rte_zlib_pmd_ops;
+
 #endif /* _RTE_ZLIB_PMD_PRIVATE_H_ */
-- 
2.9.5

  parent reply	other threads:[~2018-07-21 18:18 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-07-21 18:17 [dpdk-dev] [PATCH v3 0/5] compress: add ZLIB compression PMD Shally Verma
2018-07-21 18:17 ` [dpdk-dev] [PATCH v3 1/5] compress/zlib: add ZLIB PMD Shally Verma
2018-07-21 18:17 ` Shally Verma [this message]
2018-07-21 18:17 ` [dpdk-dev] [PATCH v3 3/5] compress/zlib: create private xform Shally Verma
2018-07-21 18:17 ` [dpdk-dev] [PATCH v3 4/5] compress/zlib: support burst enqueue/dequeue Shally Verma
2018-07-23 12:36   ` De Lara Guarch, Pablo
2018-07-23 12:52     ` Verma, Shally
2018-07-23 13:00       ` De Lara Guarch, Pablo
2018-07-23 16:53   ` Stephen Hemminger
2018-07-23 17:14     ` Verma, Shally
2018-07-23 17:35       ` Stephen Hemminger
2018-07-21 18:17 ` [dpdk-dev] [PATCH v3 5/5] doc: add ZLIB PMD guide Shally Verma

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1532197069-24224-3-git-send-email-shally.verma@caviumnetworks.com \
    --to=shally.verma@caviumnetworks.com \
    --cc=ashish.gupta@caviumnetworks.com \
    --cc=dev@dpdk.org \
    --cc=mchalla@caviumnetworks.com \
    --cc=pablo.de.lara.guarch@intel.com \
    --cc=pathreya@caviumnetworks.com \
    --cc=sunila.sahu@caviumnetworks.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).