DPDK patches and discussions
 help / color / mirror / Atom feed
From: Zhangfei Gao <zhangfei.gao@linaro.org>
To: Akhil Goyal <gakhil@marvell.com>,
	Declan Doherty <declan.doherty@intel.com>,
	Fan Zhang <roy.fan.zhang@intel.com>,
	Ashish Gupta <ashish.gupta@marvell.com>,
	Ray Kinsella <mdr@ashroe.eu>
Cc: dev@dpdk.org, acc@openeuler.org, Zhangfei Gao <zhangfei.gao@linaro.org>
Subject: [PATCH resend v3 2/6] crypto/uadk: support basic operations
Date: Sat,  8 Oct 2022 16:37:43 +0800	[thread overview]
Message-ID: <20221008083747.6559-3-zhangfei.gao@linaro.org> (raw)
In-Reply-To: <20221008083747.6559-1-zhangfei.gao@linaro.org>

Support the basic dev control operations: configure, close, start,
stop and get info, as well as queue pairs operations.

Signed-off-by: Zhangfei Gao <zhangfei.gao@linaro.org>
---
 drivers/crypto/uadk/uadk_crypto_pmd.c | 213 ++++++++++++++++++++++++--
 1 file changed, 204 insertions(+), 9 deletions(-)

diff --git a/drivers/crypto/uadk/uadk_crypto_pmd.c b/drivers/crypto/uadk/uadk_crypto_pmd.c
index ec9bb174c7..1d1a4b2897 100644
--- a/drivers/crypto/uadk/uadk_crypto_pmd.c
+++ b/drivers/crypto/uadk/uadk_crypto_pmd.c
@@ -12,6 +12,25 @@
 #include <uadk/wd_digest.h>
 #include <uadk/wd_sched.h>
 
+/* Maximum length for digest (SHA-512 needs 64 bytes) */
+#define DIGEST_LENGTH_MAX 64
+
+struct uadk_qp {
+	/* Ring for placing process packets */
+	struct rte_ring *processed_pkts;
+	/* Queue pair statistics */
+	struct rte_cryptodev_stats qp_stats;
+	/* Queue Pair Identifier */
+	uint16_t id;
+	/* Unique Queue Pair Name */
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	/* Buffer used to store the digest generated
+	 * by the driver when verifying a digest provided
+	 * by the user (using authentication verify operation)
+	 */
+	uint8_t temp_digest[DIGEST_LENGTH_MAX];
+} __rte_cache_aligned;
+
 enum uadk_crypto_version {
 	UADK_CRYPTO_V2,
 	UADK_CRYPTO_V3,
@@ -30,16 +49,192 @@ RTE_LOG_REGISTER_DEFAULT(uadk_crypto_logtype, INFO);
 		"%s() line %u: " fmt "\n", __func__, __LINE__,  \
 		## __VA_ARGS__)
 
+static const struct rte_cryptodev_capabilities uadk_crypto_v2_capabilities[] = {
+	/* End of capabilities */
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/* Configure device */
+static int
+uadk_crypto_pmd_config(struct rte_cryptodev *dev __rte_unused,
+		       struct rte_cryptodev_config *config __rte_unused)
+{
+	return 0;
+}
+
+/* Start device */
+static int
+uadk_crypto_pmd_start(struct rte_cryptodev *dev __rte_unused)
+{
+	return 0;
+}
+
+/* Stop device */
+static void
+uadk_crypto_pmd_stop(struct rte_cryptodev *dev __rte_unused)
+{
+}
+
+/* Close device */
+static int
+uadk_crypto_pmd_close(struct rte_cryptodev *dev __rte_unused)
+{
+	return 0;
+}
+
+/* Get device statistics */
+static void
+uadk_crypto_pmd_stats_get(struct rte_cryptodev *dev,
+			  struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct uadk_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->qp_stats.enqueued_count;
+		stats->dequeued_count += qp->qp_stats.dequeued_count;
+		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+	}
+}
+
+/* Reset device statistics */
+static void
+uadk_crypto_pmd_stats_reset(struct rte_cryptodev *dev __rte_unused)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct uadk_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	}
+}
+
+/* Get device info */
+static void
+uadk_crypto_pmd_info_get(struct rte_cryptodev *dev,
+			 struct rte_cryptodev_info *dev_info)
+{
+	struct uadk_crypto_priv *priv = dev->data->dev_private;
+
+	if (dev_info != NULL) {
+		dev_info->driver_id = dev->driver_id;
+		dev_info->driver_name = dev->device->driver->name;
+		dev_info->max_nb_queue_pairs = 128;
+		/* No limit of number of sessions */
+		dev_info->sym.max_nb_sessions = 0;
+		dev_info->feature_flags = dev->feature_flags;
+
+		if (priv->version == UADK_CRYPTO_V2)
+			dev_info->capabilities = uadk_crypto_v2_capabilities;
+	}
+}
+
+/* Release queue pair */
+static int
+uadk_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	struct uadk_qp *qp = dev->data->queue_pairs[qp_id];
+
+	if (qp) {
+		rte_ring_free(qp->processed_pkts);
+		rte_free(qp);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+
+	return 0;
+}
+
+/* set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+uadk_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+			    struct uadk_qp *qp)
+{
+	unsigned int n = snprintf(qp->name, sizeof(qp->name),
+				  "uadk_crypto_pmd_%u_qp_%u",
+				  dev->data->dev_id, qp->id);
+
+	if (n >= sizeof(qp->name))
+		return -EINVAL;
+
+	return 0;
+}
+
+/* Create a ring to place process packets on */
+static struct rte_ring *
+uadk_pmd_qp_create_processed_pkts_ring(struct uadk_qp *qp,
+				       unsigned int ring_size, int socket_id)
+{
+	struct rte_ring *r = qp->processed_pkts;
+
+	if (r) {
+		if (rte_ring_get_size(r) >= ring_size) {
+			UADK_LOG(INFO, "Reusing existing ring %s for processed packets",
+				 qp->name);
+			return r;
+		}
+
+		UADK_LOG(ERR, "Unable to reuse existing ring %s for processed packets",
+			 qp->name);
+		return NULL;
+	}
+
+	return rte_ring_create(qp->name, ring_size, socket_id,
+			       RING_F_EXACT_SZ);
+}
+
+static int
+uadk_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+			 const struct rte_cryptodev_qp_conf *qp_conf,
+			 int socket_id)
+{
+	struct uadk_qp *qp;
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		uadk_crypto_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("uadk PMD Queue Pair", sizeof(*qp),
+				RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL)
+		return (-ENOMEM);
+
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	if (uadk_pmd_qp_set_unique_name(dev, qp))
+		goto qp_setup_cleanup;
+
+	qp->processed_pkts = uadk_pmd_qp_create_processed_pkts_ring(qp,
+				qp_conf->nb_descriptors, socket_id);
+	if (qp->processed_pkts == NULL)
+		goto qp_setup_cleanup;
+
+	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+	return 0;
+
+qp_setup_cleanup:
+	if (qp) {
+		rte_free(qp);
+		qp = NULL;
+	}
+	return -EINVAL;
+}
+
 static struct rte_cryptodev_ops uadk_crypto_pmd_ops = {
-		.dev_configure		= NULL,
-		.dev_start		= NULL,
-		.dev_stop		= NULL,
-		.dev_close		= NULL,
-		.stats_get		= NULL,
-		.stats_reset		= NULL,
-		.dev_infos_get		= NULL,
-		.queue_pair_setup	= NULL,
-		.queue_pair_release	= NULL,
+		.dev_configure		= uadk_crypto_pmd_config,
+		.dev_start		= uadk_crypto_pmd_start,
+		.dev_stop		= uadk_crypto_pmd_stop,
+		.dev_close		= uadk_crypto_pmd_close,
+		.stats_get		= uadk_crypto_pmd_stats_get,
+		.stats_reset		= uadk_crypto_pmd_stats_reset,
+		.dev_infos_get		= uadk_crypto_pmd_info_get,
+		.queue_pair_setup	= uadk_crypto_pmd_qp_setup,
+		.queue_pair_release	= uadk_crypto_pmd_qp_release,
 		.sym_session_get_size	= NULL,
 		.sym_session_configure	= NULL,
 		.sym_session_clear	= NULL,
-- 
2.36.1


  parent reply	other threads:[~2022-10-08  8:38 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-08  8:37 [PATCH resend v3 0/6] crypto/uadk: introduce uadk crypto driver Zhangfei Gao
2022-10-08  8:37 ` [PATCH resend v3 1/6] " Zhangfei Gao
2022-10-08  8:37 ` Zhangfei Gao [this message]
2022-10-08  8:37 ` [PATCH resend v3 3/6] crypto/uadk: support enqueue/dequeue operations Zhangfei Gao
2022-10-08  8:37 ` [PATCH resend v3 4/6] crypto/uadk: support cipher algorithms Zhangfei Gao
2022-10-08  8:37 ` [PATCH resend v3 5/6] crypto/uadk: support auth algorithms Zhangfei Gao
2022-10-08  8:37 ` [PATCH resend v3 6/6] test/crypto: add cryptodev_uadk_autotest Zhangfei Gao
2022-10-10  7:55 ` [EXT] [PATCH resend v3 0/6] crypto/uadk: introduce uadk crypto driver Akhil Goyal
2022-10-10  9:30   ` Zhangfei Gao
2022-10-10  9:57     ` Akhil Goyal
2022-10-11  9:22       ` Akhil Goyal
2022-10-11  9:58         ` Zhangfei Gao
2022-10-14  3:03         ` Zhangfei Gao
2022-10-14 11:28           ` Akhil Goyal
2022-10-14 12:02             ` Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221008083747.6559-3-zhangfei.gao@linaro.org \
    --to=zhangfei.gao@linaro.org \
    --cc=acc@openeuler.org \
    --cc=ashish.gupta@marvell.com \
    --cc=declan.doherty@intel.com \
    --cc=dev@dpdk.org \
    --cc=gakhil@marvell.com \
    --cc=mdr@ashroe.eu \
    --cc=roy.fan.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).