DPDK patches and discussions
 help / color / mirror / Atom feed
From: Shijith Thotton <sthotton@marvell.com>
To: <jerinj@marvell.com>
Cc: Shijith Thotton <sthotton@marvell.com>, <dev@dpdk.org>,
	Pavan Nikhilesh <pbhagavatula@marvell.com>
Subject: [PATCH] event/cnxk: move crypto adapter ops to respective files
Date: Wed, 27 Jul 2022 12:45:36 +0530	[thread overview]
Message-ID: <3a3a26b81355825c5571f45d4c24a298b2b119d5.1658905483.git.sthotton@marvell.com> (raw)

Moved the common crypto adapter ops to file specific to eventdev
adapters.

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
---
 drivers/event/cnxk/cnxk_eventdev.c       | 121 -----------------------
 drivers/event/cnxk/cnxk_eventdev.h       |  10 +-
 drivers/event/cnxk/cnxk_eventdev_adptr.c | 115 +++++++++++++++++++++
 3 files changed, 118 insertions(+), 128 deletions(-)

diff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c
index 97dcf7b66e..b7b93778c6 100644
--- a/drivers/event/cnxk/cnxk_eventdev.c
+++ b/drivers/event/cnxk/cnxk_eventdev.c
@@ -2,129 +2,8 @@
  * Copyright(C) 2021 Marvell.
  */
 
-#include "cnxk_cryptodev_ops.h"
 #include "cnxk_eventdev.h"
 
-static int
-crypto_adapter_qp_setup(const struct rte_cryptodev *cdev,
-			struct cnxk_cpt_qp *qp)
-{
-	char name[RTE_MEMPOOL_NAMESIZE];
-	uint32_t cache_size, nb_req;
-	unsigned int req_size;
-	uint32_t nb_desc_min;
-
-	/*
-	 * Update CPT FC threshold. Decrement by hardware burst size to allow
-	 * simultaneous enqueue from all available cores.
-	 */
-	if (roc_model_is_cn10k())
-		nb_desc_min = rte_lcore_count() * 32;
-	else
-		nb_desc_min = rte_lcore_count() * 2;
-
-	if (qp->lmtline.fc_thresh < nb_desc_min) {
-		plt_err("CPT queue depth not sufficient to allow enqueueing from %d cores",
-			rte_lcore_count());
-		return -ENOSPC;
-	}
-
-	qp->lmtline.fc_thresh -= nb_desc_min;
-
-	snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u",
-		 cdev->data->dev_id, qp->lf.lf_id);
-	req_size = sizeof(struct cpt_inflight_req);
-	cache_size = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, qp->lf.nb_desc / 1.5);
-	nb_req = RTE_MAX(qp->lf.nb_desc, cache_size * rte_lcore_count());
-	qp->ca.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size,
-					   0, NULL, NULL, NULL, NULL,
-					   rte_socket_id(), 0);
-	if (qp->ca.req_mp == NULL)
-		return -ENOMEM;
-
-	qp->ca.enabled = true;
-
-	return 0;
-}
-
-int
-cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
-			   const struct rte_cryptodev *cdev,
-			   int32_t queue_pair_id)
-{
-	struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
-	uint32_t adptr_xae_cnt = 0;
-	struct cnxk_cpt_qp *qp;
-	int ret;
-
-	if (queue_pair_id == -1) {
-		uint16_t qp_id;
-
-		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
-			qp = cdev->data->queue_pairs[qp_id];
-			ret = crypto_adapter_qp_setup(cdev, qp);
-			if (ret) {
-				cnxk_crypto_adapter_qp_del(cdev, -1);
-				return ret;
-			}
-			adptr_xae_cnt += qp->ca.req_mp->size;
-		}
-	} else {
-		qp = cdev->data->queue_pairs[queue_pair_id];
-		ret = crypto_adapter_qp_setup(cdev, qp);
-		if (ret)
-			return ret;
-		adptr_xae_cnt = qp->ca.req_mp->size;
-	}
-
-	/* Update crypto adapter XAE count */
-	sso_evdev->adptr_xae_cnt += adptr_xae_cnt;
-	cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
-
-	return 0;
-}
-
-static int
-crypto_adapter_qp_free(struct cnxk_cpt_qp *qp)
-{
-	int ret;
-
-	rte_mempool_free(qp->ca.req_mp);
-	qp->ca.enabled = false;
-
-	ret = roc_cpt_lmtline_init(qp->lf.roc_cpt, &qp->lmtline, qp->lf.lf_id);
-	if (ret < 0) {
-		plt_err("Could not reset lmtline for queue pair %d",
-			qp->lf.lf_id);
-		return ret;
-	}
-
-	return 0;
-}
-
-int
-cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
-			   int32_t queue_pair_id)
-{
-	struct cnxk_cpt_qp *qp;
-
-	if (queue_pair_id == -1) {
-		uint16_t qp_id;
-
-		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
-			qp = cdev->data->queue_pairs[qp_id];
-			if (qp->ca.enabled)
-				crypto_adapter_qp_free(qp);
-		}
-	} else {
-		qp = cdev->data->queue_pairs[queue_pair_id];
-		if (qp->ca.enabled)
-			crypto_adapter_qp_free(qp);
-	}
-
-	return 0;
-}
-
 void
 cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
 		  struct rte_event_dev_info *dev_info)
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index bfd0c5627e..c9a0686b4d 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -287,13 +287,6 @@ int cnxk_sso_xstats_reset(struct rte_eventdev *event_dev,
 			  int16_t queue_port_id, const uint32_t ids[],
 			  uint32_t n);
 
-/* Crypto adapter APIs. */
-int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
-			       const struct rte_cryptodev *cdev,
-			       int32_t queue_pair_id);
-int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
-			       int32_t queue_pair_id);
-
 /* CN9K */
 void cn9k_sso_set_rsrc(void *arg);
 
@@ -318,5 +311,8 @@ int cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
 int cnxk_sso_tx_adapter_start(uint8_t id, const struct rte_eventdev *event_dev);
 int cnxk_sso_tx_adapter_stop(uint8_t id, const struct rte_eventdev *event_dev);
 int cnxk_sso_tx_adapter_free(uint8_t id, const struct rte_eventdev *event_dev);
+int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
+			       const struct rte_cryptodev *cdev, int32_t queue_pair_id);
+int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id);
 
 #endif /* __CNXK_EVENTDEV_H__ */
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 1f2e1b4b5d..3f46e79ba8 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -2,6 +2,7 @@
  * Copyright(C) 2021 Marvell.
  */
 
+#include "cnxk_cryptodev_ops.h"
 #include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
 
@@ -628,3 +629,117 @@ cnxk_sso_tx_adapter_free(uint8_t id __rte_unused,
 
 	return 0;
 }
+
+static int
+crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp)
+{
+	char name[RTE_MEMPOOL_NAMESIZE];
+	uint32_t cache_size, nb_req;
+	unsigned int req_size;
+	uint32_t nb_desc_min;
+
+	/*
+	 * Update CPT FC threshold. Decrement by hardware burst size to allow
+	 * simultaneous enqueue from all available cores.
+	 */
+	if (roc_model_is_cn10k())
+		nb_desc_min = rte_lcore_count() * 32;
+	else
+		nb_desc_min = rte_lcore_count() * 2;
+
+	if (qp->lmtline.fc_thresh < nb_desc_min) {
+		plt_err("CPT queue depth not sufficient to allow enqueueing from %d cores",
+			rte_lcore_count());
+		return -ENOSPC;
+	}
+
+	qp->lmtline.fc_thresh -= nb_desc_min;
+
+	snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u", cdev->data->dev_id, qp->lf.lf_id);
+	req_size = sizeof(struct cpt_inflight_req);
+	cache_size = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, qp->lf.nb_desc / 1.5);
+	nb_req = RTE_MAX(qp->lf.nb_desc, cache_size * rte_lcore_count());
+	qp->ca.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size, 0, NULL, NULL, NULL,
+					   NULL, rte_socket_id(), 0);
+	if (qp->ca.req_mp == NULL)
+		return -ENOMEM;
+
+	qp->ca.enabled = true;
+
+	return 0;
+}
+
+int
+cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev,
+			   int32_t queue_pair_id)
+{
+	struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
+	uint32_t adptr_xae_cnt = 0;
+	struct cnxk_cpt_qp *qp;
+	int ret;
+
+	if (queue_pair_id == -1) {
+		uint16_t qp_id;
+
+		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
+			qp = cdev->data->queue_pairs[qp_id];
+			ret = crypto_adapter_qp_setup(cdev, qp);
+			if (ret) {
+				cnxk_crypto_adapter_qp_del(cdev, -1);
+				return ret;
+			}
+			adptr_xae_cnt += qp->ca.req_mp->size;
+		}
+	} else {
+		qp = cdev->data->queue_pairs[queue_pair_id];
+		ret = crypto_adapter_qp_setup(cdev, qp);
+		if (ret)
+			return ret;
+		adptr_xae_cnt = qp->ca.req_mp->size;
+	}
+
+	/* Update crypto adapter XAE count */
+	sso_evdev->adptr_xae_cnt += adptr_xae_cnt;
+	cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	return 0;
+}
+
+static int
+crypto_adapter_qp_free(struct cnxk_cpt_qp *qp)
+{
+	int ret;
+
+	rte_mempool_free(qp->ca.req_mp);
+	qp->ca.enabled = false;
+
+	ret = roc_cpt_lmtline_init(qp->lf.roc_cpt, &qp->lmtline, qp->lf.lf_id);
+	if (ret < 0) {
+		plt_err("Could not reset lmtline for queue pair %d", qp->lf.lf_id);
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id)
+{
+	struct cnxk_cpt_qp *qp;
+
+	if (queue_pair_id == -1) {
+		uint16_t qp_id;
+
+		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
+			qp = cdev->data->queue_pairs[qp_id];
+			if (qp->ca.enabled)
+				crypto_adapter_qp_free(qp);
+		}
+	} else {
+		qp = cdev->data->queue_pairs[queue_pair_id];
+		if (qp->ca.enabled)
+			crypto_adapter_qp_free(qp);
+	}
+
+	return 0;
+}
-- 
2.25.1


             reply	other threads:[~2022-07-27  7:15 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-27  7:15 Shijith Thotton [this message]
2022-08-19  7:07 ` Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=3a3a26b81355825c5571f45d4c24a298b2b119d5.1658905483.git.sthotton@marvell.com \
    --to=sthotton@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=pbhagavatula@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).