* [PATCH v2 1/3] crypto/ipsec_mb: improve CPU code path
@ 2025-09-11 13:14 Radu Nicolau
2025-09-11 13:14 ` [PATCH v2 2/3] test/crypto: improve CPU mode coverage Radu Nicolau
2025-09-11 13:14 ` [PATCH v2 3/3] test/crypto: fix uninitialised vector fields Radu Nicolau
0 siblings, 2 replies; 3+ messages in thread
From: Radu Nicolau @ 2025-09-11 13:14 UTC (permalink / raw)
To: dev; +Cc: Radu Nicolau, Kai Ji, Pablo de Lara
Improve CPU code path with support for OOP and various small fixes.
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 124 +++++++++++++++++--------
1 file changed, 86 insertions(+), 38 deletions(-)
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
index a6c3f09b6f..356e6cd4a0 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
@@ -975,14 +975,24 @@ aesni_mb_set_docsis_sec_session_parameters(
return ret;
}
+static inline uint8_t *get_src_buf(struct rte_crypto_op *op)
+{
+ return op->sym->m_src ?
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t *) : NULL;
+}
+static inline uint8_t *get_dst_buf(struct rte_crypto_op *op)
+{
+ return op->sym->m_dst ?
+ rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *) : NULL;
+}
+
static inline uint64_t
-auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
+auth_start_offset(uint8_t *p_src, uint8_t *p_dst,
+ struct aesni_mb_session *session,
uint32_t oop, const uint32_t auth_offset,
const uint32_t cipher_offset, const uint32_t auth_length,
const uint32_t cipher_length, uint8_t lb_sgl)
{
- struct rte_mbuf *m_src, *m_dst;
- uint8_t *p_src, *p_dst;
uintptr_t u_src, u_dst;
uint32_t cipher_end, auth_end;
@@ -990,11 +1000,6 @@ auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
if (!oop || session->template_job.chain_order != IMB_ORDER_CIPHER_HASH || lb_sgl)
return auth_offset;
- m_src = op->sym->m_src;
- m_dst = op->sym->m_dst;
-
- p_src = rte_pktmbuf_mtod(m_src, uint8_t *);
- p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
u_src = (uintptr_t)p_src;
u_dst = (uintptr_t)p_dst + auth_offset;
@@ -1029,9 +1034,11 @@ auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
static inline void
set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session,
- union rte_crypto_sym_ofs sofs, void *buf, uint32_t len,
- struct rte_crypto_va_iova_ptr *iv,
- struct rte_crypto_va_iova_ptr *aad, void *digest, void *udata)
+ union rte_crypto_sym_ofs sofs, void *src_buf, void *dst_buf,
+ uint32_t len, struct rte_crypto_va_iova_ptr *iv,
+ struct rte_crypto_va_iova_ptr *aad,
+ void **digest, struct rte_crypto_va_iova_ptr *vec_digest,
+ void *udata)
{
memcpy(job, &session->template_job, sizeof(IMB_JOB));
@@ -1065,10 +1072,7 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session,
#endif
default:
- job->u.HMAC._hashed_auth_key_xor_ipad =
- session->auth.pads.inner;
- job->u.HMAC._hashed_auth_key_xor_opad =
- session->auth.pads.outer;
+ break;
}
/*
@@ -1076,20 +1080,48 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session,
* digest length as specified in the relevant IPsec RFCs
*/
- /* Set digest location and length */
- job->auth_tag_output = digest;
-
/* Data Parameters */
- job->src = buf;
- job->dst = (uint8_t *)buf + sofs.ofs.cipher.head;
+ job->src = src_buf;
+ if (dst_buf)
+ job->dst = (uint8_t *)dst_buf + sofs.ofs.cipher.head;
+ else
+ job->dst = (uint8_t *)src_buf + sofs.ofs.cipher.head;
job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head;
- job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head;
job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head -
sofs.ofs.auth.tail;
job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head -
sofs.ofs.cipher.tail;
+ job->hash_start_src_offset_in_bytes = auth_start_offset(
+ src_buf, dst_buf,
+ session, dst_buf != NULL,
+ sofs.ofs.auth.head,
+ sofs.ofs.cipher.head,
+ job->msg_len_to_hash_in_bytes,
+ job->msg_len_to_cipher_in_bytes, 0);
job->user_data = udata;
+
+ /** Check if conditions are met for digest-appended operations */
+ if (job->cipher_mode != IMB_CIPHER_NULL && job->chain_order == IMB_ORDER_HASH_CIPHER) {
+ uintptr_t end_cipher, start_cipher;
+ if (dst_buf == NULL) {
+ end_cipher = (uintptr_t)src_buf + job->msg_len_to_cipher_in_bytes;
+ start_cipher = (uintptr_t)src_buf;
+ } else {
+ end_cipher = (uintptr_t)dst_buf + job->msg_len_to_cipher_in_bytes;
+ start_cipher = (uintptr_t)dst_buf;
+ }
+ if (start_cipher < (uintptr_t)vec_digest->va &&
+ (uintptr_t)vec_digest->va < end_cipher)
+ *digest = (void *)((uintptr_t)src_buf + job->msg_len_to_hash_in_bytes);
+ }
+ job->auth_tag_output = *digest;
+
+ if (job->cipher_mode == IMB_CIPHER_NULL && dst_buf) {
+ memcpy(job->dst + job->cipher_start_src_offset_in_bytes,
+ job->src + job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes);
+ }
}
static int
@@ -1693,7 +1725,8 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
auth_len_in_bytes = op->sym->auth.data.length >> 3;
ciph_len_in_bytes = op->sym->cipher.data.length >> 3;
- job->hash_start_src_offset_in_bytes = auth_start_offset(op,
+ job->hash_start_src_offset_in_bytes = auth_start_offset(
+ get_src_buf(op), get_dst_buf(op),
session, oop, auth_off_in_bytes,
ciph_off_in_bytes, auth_len_in_bytes,
ciph_len_in_bytes, lb_sgl);
@@ -1710,7 +1743,8 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
auth_len_in_bytes = op->sym->auth.data.length >> 3;
ciph_len_in_bytes = op->sym->cipher.data.length >> 3;
- job->hash_start_src_offset_in_bytes = auth_start_offset(op,
+ job->hash_start_src_offset_in_bytes = auth_start_offset(
+ get_src_buf(op), get_dst_buf(op),
session, oop, auth_off_in_bytes,
ciph_off_in_bytes, auth_len_in_bytes,
ciph_len_in_bytes, lb_sgl);
@@ -1735,7 +1769,8 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
#endif
default:
- job->hash_start_src_offset_in_bytes = auth_start_offset(op,
+ job->hash_start_src_offset_in_bytes = auth_start_offset(
+ get_src_buf(op), get_dst_buf(op),
session, oop, op->sym->auth.data.offset,
op->sym->cipher.data.offset,
op->sym->auth.data.length,
@@ -2261,12 +2296,16 @@ aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
}
static inline int
-check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
+check_crypto_sgl(union rte_crypto_sym_ofs so,
+ const struct rte_crypto_sym_vec *vec, uint32_t idx)
{
+ const struct rte_crypto_sgl *sgl_src = vec->src_sgl + idx;
+ const struct rte_crypto_sgl *sgl_dst = vec->dest_sgl ?
+ vec->dest_sgl + idx : NULL;
/* no multi-seg support with current AESNI-MB PMD */
- if (sgl->num != 1)
+ if (sgl_src->num != 1 || (sgl_dst && sgl_dst->num != 1))
return -ENOTSUP;
- else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len)
+ else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl_src->vec[0].len)
return -EINVAL;
return 0;
}
@@ -2275,7 +2314,11 @@ static inline IMB_JOB *
submit_sync_job(IMB_MGR *mb_mgr)
{
#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
- return IMB_SUBMIT_JOB(mb_mgr);
+ IMB_JOB *job = IMB_SUBMIT_JOB(mb_mgr);
+ int err = imb_get_errno(mb_mgr);
+ if (err)
+ IPSEC_MB_LOG(ERR, "%s", imb_get_strerror(err));
+ return job;
#else
return IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
#endif
@@ -2283,7 +2326,7 @@ submit_sync_job(IMB_MGR *mb_mgr)
static inline uint32_t
generate_sync_dgst(struct rte_crypto_sym_vec *vec,
- const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
+ uint8_t *dgst[], uint32_t len)
{
uint32_t i, k;
@@ -2299,7 +2342,7 @@ generate_sync_dgst(struct rte_crypto_sym_vec *vec,
static inline uint32_t
verify_sync_dgst(struct rte_crypto_sym_vec *vec,
- const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
+ uint8_t *dgst[], uint32_t len)
{
uint32_t i, k;
@@ -2322,11 +2365,12 @@ aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
{
int32_t ret;
uint32_t i, j, k, len;
- void *buf;
+ void *src_buf, *dst_buf = NULL;
IMB_JOB *job;
IMB_MGR *mb_mgr;
struct aesni_mb_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX];
+ uint8_t *tmp_dgst_ptr[vec->num];
/* get per-thread MB MGR, create one if needed */
mb_mgr = get_per_thread_mb_mgr();
@@ -2334,13 +2378,15 @@ aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
return 0;
for (i = 0, j = 0, k = 0; i != vec->num; i++) {
- ret = check_crypto_sgl(sofs, vec->src_sgl + i);
+ ret = check_crypto_sgl(sofs, vec, i);
if (ret != 0) {
vec->status[i] = ret;
continue;
}
- buf = vec->src_sgl[i].vec[0].base;
+ src_buf = vec->src_sgl[i].vec[0].base;
+ if (vec->dest_sgl != NULL && vec->dest_sgl[i].vec[0].base != NULL)
+ dst_buf = vec->dest_sgl[i].vec[0].base;
len = vec->src_sgl[i].vec[0].len;
job = IMB_GET_NEXT_JOB(mb_mgr);
@@ -2351,8 +2397,10 @@ aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
}
/* Submit job for processing */
- set_cpu_mb_job_params(job, s, sofs, buf, len, &vec->iv[i],
- &vec->aad[i], tmp_dgst[i], &vec->status[i]);
+ tmp_dgst_ptr[i] = tmp_dgst[i];
+ set_cpu_mb_job_params(job, s, sofs, src_buf, dst_buf, len, &vec->iv[i],
+ &vec->aad[i], (void **)&tmp_dgst_ptr[i], &vec->digest[i],
+ &vec->status[i]);
job = submit_sync_job(mb_mgr);
j++;
@@ -2365,14 +2413,14 @@ aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
k += flush_mb_sync_mgr(mb_mgr);
/* finish processing for successful jobs: check/update digest */
- if (k != 0) {
+ if (k != 0 && s->template_job.hash_alg != IMB_AUTH_NULL) {
if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
k = verify_sync_dgst(vec,
- (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
+ tmp_dgst_ptr,
s->auth.req_digest_len);
else
k = generate_sync_dgst(vec,
- (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
+ tmp_dgst_ptr,
s->auth.req_digest_len);
}
--
2.50.1
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH v2 2/3] test/crypto: improve CPU mode coverage
2025-09-11 13:14 [PATCH v2 1/3] crypto/ipsec_mb: improve CPU code path Radu Nicolau
@ 2025-09-11 13:14 ` Radu Nicolau
2025-09-11 13:14 ` [PATCH v2 3/3] test/crypto: fix uninitialised vector fields Radu Nicolau
1 sibling, 0 replies; 3+ messages in thread
From: Radu Nicolau @ 2025-09-11 13:14 UTC (permalink / raw)
To: dev; +Cc: Radu Nicolau, Akhil Goyal, Fan Zhang
Add crypto CPU mode code path for blockcipher tests.
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
app/test/test_cryptodev.c | 2 +-
app/test/test_cryptodev.h | 2 +
app/test/test_cryptodev_blockcipher.c | 103 +++++++++++++++++++++-----
3 files changed, 89 insertions(+), 18 deletions(-)
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 5229ac2bf6..0773d320d4 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -75,7 +75,7 @@
static int gbl_driver_id;
-static enum rte_security_session_action_type gbl_action_type =
+enum rte_security_session_action_type gbl_action_type =
RTE_SECURITY_ACTION_TYPE_NONE;
enum cryptodev_api_test_type global_api_test_type = CRYPTODEV_API_TEST;
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index e243cf945a..23d12ec961 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -5,6 +5,7 @@
#define TEST_CRYPTODEV_H_
#include <rte_cryptodev.h>
+#include <rte_security.h>
#define MAX_NUM_OPS_INFLIGHT (4096)
#define MIN_NUM_OPS_INFLIGHT (128)
@@ -85,6 +86,7 @@ enum cryptodev_api_test_type {
CRYPTODEV_RAW_API_TEST
};
+extern enum rte_security_session_action_type gbl_action_type;
extern enum cryptodev_api_test_type global_api_test_type;
extern struct crypto_testsuite_params *p_testsuite_params;
diff --git a/app/test/test_cryptodev_blockcipher.c b/app/test/test_cryptodev_blockcipher.c
index 5a124bbb5e..6b37347789 100644
--- a/app/test/test_cryptodev_blockcipher.c
+++ b/app/test/test_cryptodev_blockcipher.c
@@ -567,25 +567,95 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
goto error_exit;
}
} else {
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
- snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
- "line %u FAILED: %s",
- __LINE__, "Error sending packet for encryption");
- status = TEST_FAILED;
- goto error_exit;
- }
+ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
+ int32_t n_src, n_dst, st, n;
+ struct rte_crypto_sym_op *sop;
+ union rte_crypto_sym_ofs ofs;
+ struct rte_crypto_sgl sgl_src, sgl_dst;
+ struct rte_crypto_sym_vec symvec = {0};
+ struct rte_crypto_va_iova_ptr iv_ptr, digest_ptr;
+ struct rte_crypto_vec vec_src[UINT8_MAX];
+ struct rte_crypto_vec vec_dst[UINT8_MAX];
+ uint32_t cipher_offset, cipher_len, auth_offset, auth_len, max_len;
+ bool is_oop = op->sym->m_dst != NULL;
+
+ if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SESSIONLESS) {
+ status = TEST_SKIPPED;
+ goto error_exit;
+ }
+
+ sop = op->sym;
+ cipher_offset = sop->cipher.data.offset;
+ cipher_len = sop->cipher.data.length;
+ auth_offset = sop->auth.data.offset;
+ auth_len = sop->auth.data.length;
+ max_len = RTE_MAX(cipher_offset + cipher_len, auth_offset + auth_len);
+
+ n_src = rte_crypto_mbuf_to_vec(sop->m_src, 0, max_len,
+ vec_src, RTE_DIM(vec_src));
+ if (n_src < 0 || n_src != sop->m_src->nb_segs) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ sgl_src.vec = vec_src;
+ sgl_src.num = n_src;
+ symvec.src_sgl = &sgl_src;
+ if (is_oop) {
+ n_dst = rte_crypto_mbuf_to_vec(sop->m_dst, 0, max_len,
+ vec_dst, RTE_DIM(vec_dst));
+ sgl_dst.vec = vec_dst;
+ sgl_dst.num = n_dst;
+ symvec.dest_sgl = &sgl_dst;
+ } else {
+ symvec.dest_sgl = NULL;
+ }
+
+ symvec.iv = &iv_ptr;
+ symvec.digest = &digest_ptr;
+ symvec.status = &st;
+ symvec.num = 1;
+ iv_ptr.va = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ digest_ptr.va = (void *)sop->auth.digest.data;
+ ofs.ofs.cipher.head = cipher_offset;
+ ofs.ofs.cipher.tail = max_len - cipher_offset - cipher_len;
+ ofs.ofs.auth.head = auth_offset;
+ ofs.ofs.auth.tail = max_len - auth_offset - auth_len;
+
+ n = rte_cryptodev_sym_cpu_crypto_process(dev_id, sop->session, ofs,
+ &symvec);
+ if (st == -ENOTSUP) {
+ status = TEST_SKIPPED;
+ goto error_exit;
+ }
+ if (n != 1) {
+ status = TEST_FAILED;
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ goto error_exit;
+ } else {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ } else {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for encryption");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
- op = NULL;
+ op = NULL;
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
- rte_pause();
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
+ rte_pause();
- if (!op) {
- snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
- "line %u FAILED: %s",
- __LINE__, "Failed to process sym crypto op");
- status = TEST_FAILED;
- goto error_exit;
+ if (!op) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process sym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
}
}
@@ -649,7 +719,6 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_GEN) {
uint8_t *auth_res = pktmbuf_mtod_offset(iobuf,
tdata->ciphertext.len);
-
if (memcmp(auth_res, tdata->digest.data, digest_len)) {
snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "line %u "
"FAILED: %s", __LINE__, "Generated "
--
2.50.1
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH v2 3/3] test/crypto: fix uninitialised vector fields
2025-09-11 13:14 [PATCH v2 1/3] crypto/ipsec_mb: improve CPU code path Radu Nicolau
2025-09-11 13:14 ` [PATCH v2 2/3] test/crypto: improve CPU mode coverage Radu Nicolau
@ 2025-09-11 13:14 ` Radu Nicolau
1 sibling, 0 replies; 3+ messages in thread
From: Radu Nicolau @ 2025-09-11 13:14 UTC (permalink / raw)
To: dev
Cc: Radu Nicolau, Akhil Goyal, Fan Zhang, Konstantin Ananyev,
Marcin Smoczynski
For CPU crypto code path make sure all fields in the
rte_crypto_sym_vec struct are initialised.
Fixes: 2a9f232ce60e ("test/crypto: add CPU crypto mode cases")
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
app/test/test_cryptodev.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 0773d320d4..32e4e60b60 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -289,7 +289,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
struct rte_crypto_vec data_vec[UINT8_MAX], dest_data_vec[UINT8_MAX];
struct rte_crypto_va_iova_ptr cipher_iv, digest, aad_auth_iv;
union rte_crypto_sym_ofs ofs;
- struct rte_crypto_sym_vec vec;
+ struct rte_crypto_sym_vec vec = {0};
struct rte_crypto_sgl sgl, dest_sgl;
uint32_t max_len;
union rte_cryptodev_session_ctx sess;
@@ -526,7 +526,7 @@ process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
struct rte_crypto_sym_op *sop;
union rte_crypto_sym_ofs ofs;
struct rte_crypto_sgl sgl;
- struct rte_crypto_sym_vec symvec;
+ struct rte_crypto_sym_vec symvec = {0};
struct rte_crypto_va_iova_ptr iv_ptr, aad_ptr, digest_ptr;
struct rte_crypto_vec vec[UINT8_MAX];
@@ -572,7 +572,7 @@ process_cpu_crypt_auth_op(uint8_t dev_id, struct rte_crypto_op *op)
struct rte_crypto_sym_op *sop;
union rte_crypto_sym_ofs ofs;
struct rte_crypto_sgl sgl;
- struct rte_crypto_sym_vec symvec;
+ struct rte_crypto_sym_vec symvec = {0};
struct rte_crypto_va_iova_ptr iv_ptr, digest_ptr;
struct rte_crypto_vec vec[UINT8_MAX];
--
2.50.1
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2025-09-11 13:15 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-09-11 13:14 [PATCH v2 1/3] crypto/ipsec_mb: improve CPU code path Radu Nicolau
2025-09-11 13:14 ` [PATCH v2 2/3] test/crypto: improve CPU mode coverage Radu Nicolau
2025-09-11 13:14 ` [PATCH v2 3/3] test/crypto: fix uninitialised vector fields Radu Nicolau
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).