DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ciara Power <ciara.power@intel.com>
To: dev@dpdk.org
Cc: kai.ji@intel.com, gakhil@marvell.com,
	Marcel Cornu <marcel.d.cornu@intel.com>,
	Pablo de Lara <pablo.de.lara.guarch@intel.com>,
	Ciara Power <ciara.power@intel.com>
Subject: [PATCH v2 2/8] crypto/ipsec_mb: use burst API in aesni_mb
Date: Tue, 16 May 2023 15:24:16 +0000	[thread overview]
Message-ID: <20230516152422.606617-3-ciara.power@intel.com> (raw)
In-Reply-To: <20230516152422.606617-1-ciara.power@intel.com>

From: Marcel Cornu <marcel.d.cornu@intel.com>

Use new ipsec_mb burst API in dequeue burst function,
when ipsec_mb version is v1.3 or newer.

Signed-off-by: Marcel Cornu <marcel.d.cornu@intel.com>
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
---
v2: moved some functions inside ifdef as they are only used when
    IPSec_MB version is 1.2 or lower.
---
 drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 202 ++++++++++++++++++++-----
 1 file changed, 167 insertions(+), 35 deletions(-)

diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
index c53548aa3b..b22c0183eb 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
@@ -9,6 +9,10 @@ struct aesni_mb_op_buf_data {
 	uint32_t offset;
 };
 
+#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
+static IMB_JOB *jobs[IMB_MAX_BURST_SIZE] = {NULL};
+#endif
+
 /**
  * Calculate the authentication pre-computes
  *
@@ -1884,6 +1888,168 @@ post_process_mb_sync_job(IMB_JOB *job)
 	st[0] = (job->status == IMB_STATUS_COMPLETED) ? 0 : EBADMSG;
 }
 
+static inline uint32_t
+handle_completed_sync_jobs(IMB_JOB *job, IMB_MGR *mb_mgr)
+{
+	uint32_t i;
+
+	for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr))
+		post_process_mb_sync_job(job);
+
+	return i;
+}
+
+static inline uint32_t
+flush_mb_sync_mgr(IMB_MGR *mb_mgr)
+{
+	IMB_JOB *job;
+
+	job = IMB_FLUSH_JOB(mb_mgr);
+	return handle_completed_sync_jobs(job, mb_mgr);
+}
+
+static inline IMB_JOB *
+set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op)
+{
+	job->chain_order = IMB_ORDER_HASH_CIPHER;
+	job->cipher_mode = IMB_CIPHER_NULL;
+	job->hash_alg = IMB_AUTH_NULL;
+	job->cipher_direction = IMB_DIR_DECRYPT;
+
+	/* Set user data to be crypto operation data struct */
+	job->user_data = op;
+
+	return job;
+}
+
+#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
+static uint16_t
+aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct ipsec_mb_qp *qp = queue_pair;
+	IMB_MGR *mb_mgr = qp->mb_mgr;
+	struct rte_crypto_op *op;
+	struct rte_crypto_op *deqd_ops[IMB_MAX_BURST_SIZE];
+	IMB_JOB *job;
+	int retval, processed_jobs = 0;
+	uint16_t i, nb_jobs;
+
+	if (unlikely(nb_ops == 0 || mb_mgr == NULL))
+		return 0;
+
+	uint8_t digest_idx = qp->digest_idx;
+	uint16_t burst_sz = (nb_ops > IMB_MAX_BURST_SIZE) ?
+		IMB_MAX_BURST_SIZE : nb_ops;
+
+	/*
+	 * If nb_ops is greater than the max supported
+	 * ipsec_mb burst size, then process in bursts of
+	 * IMB_MAX_BURST_SIZE until all operations are submitted
+	 */
+	while (nb_ops) {
+		uint16_t nb_submit_ops;
+		uint16_t n = (nb_ops / burst_sz) ?
+			burst_sz : nb_ops;
+
+		while (unlikely((IMB_GET_NEXT_BURST(mb_mgr, n, jobs)) < n)) {
+			/*
+			 * Not enough free jobs in the queue
+			 * Flush n jobs until enough jobs available
+			 */
+			nb_jobs = IMB_FLUSH_BURST(mb_mgr, n, jobs);
+			for (i = 0; i < nb_jobs; i++) {
+				job = jobs[i];
+
+				op = post_process_mb_job(qp, job);
+				if (op) {
+					ops[processed_jobs++] = op;
+					qp->stats.dequeued_count++;
+				} else {
+					qp->stats.dequeue_err_count++;
+					break;
+				}
+			}
+		}
+
+		/*
+		 * Get the next operations to process from ingress queue.
+		 * There is no need to return the job to the IMB_MGR
+		 * if there are no more operations to process, since
+		 * the IMB_MGR can use that pointer again in next
+		 * get_next calls.
+		 */
+		nb_submit_ops = rte_ring_dequeue_burst(qp->ingress_queue,
+						(void **)deqd_ops, n, NULL);
+		for (i = 0; i < nb_submit_ops; i++) {
+			job = jobs[i];
+			op = deqd_ops[i];
+
+#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
+			if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+				retval = set_sec_mb_job_params(job, qp, op,
+							       &digest_idx);
+			else
+#endif
+				retval = set_mb_job_params(job, qp, op,
+							   &digest_idx, mb_mgr);
+
+			if (unlikely(retval != 0)) {
+				qp->stats.dequeue_err_count++;
+				set_job_null_op(job, op);
+			}
+		}
+
+		/* Submit jobs to multi-buffer for processing */
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
+		int err = 0;
+
+		nb_jobs = IMB_SUBMIT_BURST(mb_mgr, nb_submit_ops, jobs);
+		err = imb_get_errno(mb_mgr);
+		if (err)
+			IPSEC_MB_LOG(ERR, "%s", imb_get_strerror(err));
+#else
+		nb_jobs = IMB_SUBMIT_BURST_NOCHECK(mb_mgr,
+						   nb_submit_ops, jobs);
+#endif
+		for (i = 0; i < nb_jobs; i++) {
+			job = jobs[i];
+
+			op = post_process_mb_job(qp, job);
+			if (op) {
+				ops[processed_jobs++] = op;
+				qp->stats.dequeued_count++;
+			} else {
+				qp->stats.dequeue_err_count++;
+				break;
+			}
+		}
+
+		qp->digest_idx = digest_idx;
+
+		if (processed_jobs < 1) {
+			nb_jobs = IMB_FLUSH_BURST(mb_mgr, n, jobs);
+
+			for (i = 0; i < nb_jobs; i++) {
+				job = jobs[i];
+
+				op = post_process_mb_job(qp, job);
+				if (op) {
+					ops[processed_jobs++] = op;
+					qp->stats.dequeued_count++;
+				} else {
+					qp->stats.dequeue_err_count++;
+					break;
+				}
+			}
+		}
+		nb_ops -= n;
+	}
+
+	return processed_jobs;
+}
+#else
+
 /**
  * Process a completed IMB_JOB job and keep processing jobs until
  * get_completed_job return NULL
@@ -1924,26 +2090,6 @@ handle_completed_jobs(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
 	return processed_jobs;
 }
 
-static inline uint32_t
-handle_completed_sync_jobs(IMB_JOB *job, IMB_MGR *mb_mgr)
-{
-	uint32_t i;
-
-	for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr))
-		post_process_mb_sync_job(job);
-
-	return i;
-}
-
-static inline uint32_t
-flush_mb_sync_mgr(IMB_MGR *mb_mgr)
-{
-	IMB_JOB *job;
-
-	job = IMB_FLUSH_JOB(mb_mgr);
-	return handle_completed_sync_jobs(job, mb_mgr);
-}
-
 static inline uint16_t
 flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
 		struct rte_crypto_op **ops, uint16_t nb_ops)
@@ -1960,20 +2106,6 @@ flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
 	return processed_ops;
 }
 
-static inline IMB_JOB *
-set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op)
-{
-	job->chain_order = IMB_ORDER_HASH_CIPHER;
-	job->cipher_mode = IMB_CIPHER_NULL;
-	job->hash_alg = IMB_AUTH_NULL;
-	job->cipher_direction = IMB_DIR_DECRYPT;
-
-	/* Set user data to be crypto operation data struct */
-	job->user_data = op;
-
-	return job;
-}
-
 static uint16_t
 aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
@@ -2054,7 +2186,7 @@ aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
 
 	return processed_jobs;
 }
-
+#endif
 static inline int
 check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
 {
-- 
2.25.1


  parent reply	other threads:[~2023-05-16 15:24 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-21 13:12 [PATCH 0/8] add AESNI_MB optimisations Ciara Power
2023-04-21 13:12 ` [PATCH 1/8] crypto/ipsec_mb: use GMAC dedicated algorithms Ciara Power
2023-04-21 13:12 ` [PATCH 2/8] crypto/ipsec_mb: use burst API in aesni_mb Ciara Power
2023-04-21 13:12 ` [PATCH 3/8] crypto/ipsec_mb: use new SGL API Ciara Power
2023-04-21 13:12 ` [PATCH 4/8] crypto/ipsec_mb: remove unneeded fields in crypto session Ciara Power
2023-04-21 13:12 ` [PATCH 5/8] crypto/ipsec_mb: store template job Ciara Power
2023-04-21 13:12 ` [PATCH 6/8] crypto/ipsec_mb: optimize for GCM case Ciara Power
2023-04-21 13:12 ` [PATCH 7/8] crypto/ipsec_mb: do not free linear_sgl always Ciara Power
2023-04-21 13:12 ` [PATCH 8/8] crypto/ipsec_mb: set and use session ID Ciara Power
2024-02-22 20:52   ` Wathsala Wathawana Vithanage
2023-05-16 12:25 ` [EXT] [PATCH 0/8] add AESNI_MB optimisations Akhil Goyal
2023-05-16 12:54   ` Power, Ciara
2023-05-16 15:24 ` [PATCH v2 " Ciara Power
2023-05-16 15:24   ` [PATCH v2 1/8] crypto/ipsec_mb: use GMAC dedicated algorithms Ciara Power
2023-05-16 15:24   ` Ciara Power [this message]
2023-05-16 15:24   ` [PATCH v2 3/8] crypto/ipsec_mb: use new SGL API Ciara Power
2023-05-16 15:24   ` [PATCH v2 4/8] crypto/ipsec_mb: remove unneeded fields in crypto session Ciara Power
2023-05-16 15:24   ` [PATCH v2 5/8] crypto/ipsec_mb: store template job Ciara Power
2023-05-16 15:24   ` [PATCH v2 6/8] crypto/ipsec_mb: optimize for GCM case Ciara Power
2023-05-16 15:24   ` [PATCH v2 7/8] crypto/ipsec_mb: do not free linear_sgl always Ciara Power
2023-05-16 15:24   ` [PATCH v2 8/8] crypto/ipsec_mb: set and use session ID Ciara Power
2023-06-10 20:15     ` Thomas Monjalon
2023-06-14  5:35       ` [EXT] " Akhil Goyal
2023-06-15  2:46         ` Fangming Fang
2023-06-15  4:47           ` Akhil Goyal
2023-06-16  9:25             ` De Lara Guarch, Pablo
2023-06-16  9:38               ` Akhil Goyal
2023-06-20  6:32                 ` Fangming Fang
2024-02-21  5:01                   ` Patrick Robb
2024-02-21  5:10                     ` [EXT] " Honnappa Nagarahalli
2024-02-21  5:23                       ` Patrick Robb
2024-02-21  9:50                       ` Power, Ciara
2024-02-21 19:09                         ` Patrick Robb
2024-02-22  1:55                     ` [EXT] " Wathsala Wathawana Vithanage
2024-02-26 23:23                       ` Wathsala Wathawana Vithanage
2024-02-27  1:05                         ` Patrick Robb
2024-02-27  6:13                         ` Akhil Goyal
2024-03-05 17:36                         ` Wathsala Wathawana Vithanage
2023-06-20 14:41                 ` Thomas Monjalon
2023-06-21 19:11                   ` De Lara Guarch, Pablo
2023-05-17 16:44   ` [PATCH v2 0/8] add AESNI_MB optimisations Ji, Kai
2023-05-24 11:44     ` Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230516152422.606617-3-ciara.power@intel.com \
    --to=ciara.power@intel.com \
    --cc=dev@dpdk.org \
    --cc=gakhil@marvell.com \
    --cc=kai.ji@intel.com \
    --cc=marcel.d.cornu@intel.com \
    --cc=pablo.de.lara.guarch@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).