patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Ciara Power <ciara.power@intel.com>
To: stable@dpdk.org
Cc: ktraynor@redhat.com, Ciara Power <ciara.power@intel.com>,
	John Griffin <john.griffin@intel.com>,
	Fiona Trahe <fiona.trahe@intel.com>,
	Deepak Kumar Jain <deepak.k.jain@intel.com>
Subject: [PATCH 21.11] crypto/qat: fix raw API null algorithm digest
Date: Tue, 21 Nov 2023 16:22:09 +0000	[thread overview]
Message-ID: <20231121162209.3945846-1-ciara.power@intel.com> (raw)

[ upstream commit d7d52b37e89132f07121323c449ac838e6448ae0 ]

QAT HW generates bytes of 0x00 digest, even when a digest of len 0 is
requested for NULL. This caused test failures when the test vector had
digest len 0, as the buffer has unexpected changed bytes.

By placing the digest into the cookie for NULL authentication,
the buffer remains unchanged as expected, and the digest
is placed to the side, as it won't be used anyway.

This fix was previously added for the main QAT code path, but it also
needs to be included for the raw API code path.

Fixes: db0e952a5c01 ("crypto/qat: add NULL capability")

Signed-off-by: Ciara Power <ciara.power@intel.com>
---
 drivers/crypto/qat/qat_sym_hw_dp.c | 42 +++++++++++++++++++++++++++---
 1 file changed, 38 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 792ad2b213..8b505a87e0 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -251,13 +251,17 @@ qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
 	struct qat_qp *qp = qp_data;
 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
 	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
 	struct qat_sym_session *ctx = dp_ctx->session;
 	struct icp_qat_fw_la_bulk_req *req;
 	int32_t data_len;
 	uint32_t tail = dp_ctx->tail;
+	struct rte_crypto_va_iova_ptr null_digest;
+	struct rte_crypto_va_iova_ptr *job_digest = digest;
 
 	req = (struct icp_qat_fw_la_bulk_req *)(
 		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
@@ -266,7 +270,11 @@ qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
 		return -1;
 	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
 
-	enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
+		null_digest.iova = cookie->digest_null_phys_addr;
+		job_digest = &null_digest;
+	}
+	enqueue_one_auth_job(ctx, req, job_digest, auth_iv, ofs,
 			(uint32_t)data_len);
 
 	dp_ctx->tail = tail;
@@ -283,11 +291,14 @@ qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
 	struct qat_qp *qp = qp_data;
 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
 	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
 	struct qat_sym_session *ctx = dp_ctx->session;
 	uint32_t i, n;
 	uint32_t tail;
 	struct icp_qat_fw_la_bulk_req *req;
 	int32_t data_len;
+	struct rte_crypto_va_iova_ptr null_digest;
+	struct rte_crypto_va_iova_ptr *job_digest = NULL;
 
 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
 	if (unlikely(n == 0)) {
@@ -301,6 +312,7 @@ qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
 	for (i = 0; i < n; i++) {
 		req  = (struct icp_qat_fw_la_bulk_req *)(
 			(uint8_t *)tx_queue->base_addr + tail);
+		cookie = qp->op_cookies[tail >> tx_queue->trailz];
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
 		data_len = qat_sym_dp_parse_data_vec(qp, req,
@@ -309,7 +321,12 @@ qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_auth_job(ctx, req, &vec->digest[i],
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
+			null_digest.iova = cookie->digest_null_phys_addr;
+			job_digest = &null_digest;
+		} else
+			job_digest = &vec->digest[i];
+		enqueue_one_auth_job(ctx, req, job_digest,
 			&vec->auth_iv[i], ofs, (uint32_t)data_len);
 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
 	}
@@ -433,23 +450,31 @@ qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
 	struct qat_qp *qp = qp_data;
 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
 	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
 	struct qat_sym_session *ctx = dp_ctx->session;
 	struct icp_qat_fw_la_bulk_req *req;
 	int32_t data_len;
 	uint32_t tail = dp_ctx->tail;
+	struct rte_crypto_va_iova_ptr null_digest;
+	struct rte_crypto_va_iova_ptr *job_digest = digest;
 
 	req = (struct icp_qat_fw_la_bulk_req *)(
 		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
 	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
 	if (unlikely(data_len < 0))
 		return -1;
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
+		null_digest.iova = cookie->digest_null_phys_addr;
+		job_digest = &null_digest;
+	}
 	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
 
 	if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
-			cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
+			cipher_iv, job_digest, auth_iv, ofs, (uint32_t)data_len)))
 		return -1;
 
 	dp_ctx->tail = tail;
@@ -466,11 +491,14 @@ qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
 	struct qat_qp *qp = qp_data;
 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
 	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
 	struct qat_sym_session *ctx = dp_ctx->session;
 	uint32_t i, n;
 	uint32_t tail;
 	struct icp_qat_fw_la_bulk_req *req;
 	int32_t data_len;
+	struct rte_crypto_va_iova_ptr null_digest;
+	struct rte_crypto_va_iova_ptr *job_digest;
 
 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
 	if (unlikely(n == 0)) {
@@ -484,6 +512,7 @@ qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
 	for (i = 0; i < n; i++) {
 		req  = (struct icp_qat_fw_la_bulk_req *)(
 			(uint8_t *)tx_queue->base_addr + tail);
+		cookie = qp->op_cookies[tail >> tx_queue->trailz];
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
 		data_len = qat_sym_dp_parse_data_vec(qp, req,
@@ -491,10 +520,15 @@ qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
 			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
+			null_digest.iova = cookie->digest_null_phys_addr;
+			job_digest = &null_digest;
+		} else
+			job_digest = &vec->digest[i];
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
 		if (unlikely(enqueue_one_chain_job(ctx, req,
 			vec->src_sgl[i].vec, vec->src_sgl[i].num,
-			&vec->iv[i], &vec->digest[i],
+			&vec->iv[i], job_digest,
 			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
 			break;
 
-- 
2.25.1


             reply	other threads:[~2023-11-21 16:22 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-11-21 16:22 Ciara Power [this message]
2023-11-23 10:48 ` Kevin Traynor

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231121162209.3945846-1-ciara.power@intel.com \
    --to=ciara.power@intel.com \
    --cc=deepak.k.jain@intel.com \
    --cc=fiona.trahe@intel.com \
    --cc=john.griffin@intel.com \
    --cc=ktraynor@redhat.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).