From: pbronowx <piotrx.bronowski@intel.com>
To: dev@dpdk.org
Cc: roy.fan.zhang@intel.com, thomas@monjalon.net, gakhil@marvell.com,
ferruh.yigit@intel.com, declan.doherty@intel.com,
pbronowx <piotrx.bronowski@intel.com>
Subject: [dpdk-dev] [RFC 4/7] crypto/ipsec_mb: move kasumi PMD to ipsec_mb framework
Date: Fri, 18 Jun 2021 12:18:00 +0000 [thread overview]
Message-ID: <20210618121803.1189857-5-piotrx.bronowski@intel.com> (raw)
In-Reply-To: <20210618121803.1189857-1-piotrx.bronowski@intel.com>
This patch removes crypto/kasumi folder and gathers all kasumi PMD
implementation specific details into single file pmd_kasumi.c
in crypto/ipsec_mb folder.
Signed-off-by: pbronowx <piotrx.bronowski@intel.com>
---
drivers/crypto/ipsec_mb/meson.build | 3 +-
drivers/crypto/ipsec_mb/pmd_kasumi.c | 557 +++++++++++++++
.../ipsec_mb/rte_ipsec_mb_pmd_private.h | 7 +
drivers/crypto/kasumi/kasumi_pmd_private.h | 81 ---
drivers/crypto/kasumi/meson.build | 24 -
drivers/crypto/kasumi/rte_kasumi_pmd.c | 642 ------------------
drivers/crypto/kasumi/rte_kasumi_pmd_ops.c | 316 ---------
drivers/crypto/kasumi/version.map | 3 -
drivers/crypto/meson.build | 1 -
9 files changed, 566 insertions(+), 1068 deletions(-)
create mode 100644 drivers/crypto/ipsec_mb/pmd_kasumi.c
delete mode 100644 drivers/crypto/kasumi/kasumi_pmd_private.h
delete mode 100644 drivers/crypto/kasumi/meson.build
delete mode 100644 drivers/crypto/kasumi/rte_kasumi_pmd.c
delete mode 100644 drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
delete mode 100644 drivers/crypto/kasumi/version.map
diff --git a/drivers/crypto/ipsec_mb/meson.build b/drivers/crypto/ipsec_mb/meson.build
index 2f3a170fde..039725ce7d 100644
--- a/drivers/crypto/ipsec_mb/meson.build
+++ b/drivers/crypto/ipsec_mb/meson.build
@@ -24,6 +24,7 @@ endif
sources = files('rte_ipsec_mb_pmd.c',
'rte_ipsec_mb_pmd_ops.c',
'pmd_aesni_mb.c',
- 'pmd_aesni_gcm.c'
+ 'pmd_aesni_gcm.c',
+ 'pmd_kasumi.c'
)
deps += ['bus_vdev', 'net', 'security']
diff --git a/drivers/crypto/ipsec_mb/pmd_kasumi.c b/drivers/crypto/ipsec_mb/pmd_kasumi.c
new file mode 100644
index 0000000000..ab0e7f8052
--- /dev/null
+++ b/drivers/crypto/ipsec_mb/pmd_kasumi.c
@@ -0,0 +1,557 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2021 Intel Corporation
+ */
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_cpuflags.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_hexdump.h>
+#include <rte_malloc.h>
+
+#include "rte_ipsec_mb_pmd_private.h"
+
+#define KASUMI_KEY_LENGTH 16
+#define KASUMI_IV_LENGTH 8
+#define KASUMI_MAX_BURST 4
+#define BYTE_LEN 8
+#define KASUMI_DIGEST_LENGTH 4
+
+uint8_t pmd_driver_id_kasumi;
+
+static const struct rte_cryptodev_capabilities kasumi_capabilities[] = {
+ { /* KASUMI (F9) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_KASUMI_F9,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* KASUMI (F8) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** KASUMI private session structure */
+struct kasumi_session {
+ /* Keys have to be 16-byte aligned */
+ kasumi_key_sched_t pKeySched_cipher;
+ kasumi_key_sched_t pKeySched_hash;
+ enum ipsec_mb_operation op;
+ enum rte_crypto_auth_operation auth_op;
+ uint16_t cipher_iv_offset;
+} __rte_cache_aligned;
+
+struct kasumi_qp_data {
+ uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
+ /* *< Buffers used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+};
+
+/** Parse crypto xform chain and set private session parameters. */
+static int
+kasumi_session_configure(MB_MGR *mgr, void *priv_sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ enum ipsec_mb_operation mode;
+ struct kasumi_session *sess = (struct kasumi_session *)priv_sess;
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
+ &cipher_xform, NULL);
+
+ if (ret)
+ return ret;
+
+ if (cipher_xform) {
+ /* Only KASUMI F8 supported */
+ if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
+ IPSEC_MB_LOG(ERR, "Unsupported cipher algorithm ");
+ return -ENOTSUP;
+ }
+
+ sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+ if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
+ IPSEC_MB_LOG(ERR, "Wrong IV length");
+ return -EINVAL;
+ }
+
+ /* Initialize key */
+ IMB_KASUMI_INIT_F8_KEY_SCHED(mgr,
+ cipher_xform->cipher.key.data,
+ &sess->pKeySched_cipher);
+ }
+
+ if (auth_xform) {
+ /* Only KASUMI F9 supported */
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
+ IPSEC_MB_LOG(ERR, "Unsupported authentication");
+ return -ENOTSUP;
+ }
+
+ if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
+ IPSEC_MB_LOG(ERR, "Wrong digest length");
+ return -EINVAL;
+ }
+
+ sess->auth_op = auth_xform->auth.op;
+
+ /* Initialize key */
+ IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, auth_xform->auth.key.data,
+ &sess->pKeySched_hash);
+ }
+
+ sess->op = mode;
+ return ret;
+}
+
+/** Encrypt/decrypt mbufs with same cipher key. */
+static uint8_t
+process_kasumi_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
+ struct kasumi_session *session, uint8_t num_ops)
+{
+ unsigned int i;
+ uint8_t processed_ops = 0;
+ const void *src[num_ops];
+ void *dst[num_ops];
+ uint8_t *iv_ptr;
+ uint64_t iv[num_ops];
+ uint32_t num_bytes[num_ops];
+
+ for (i = 0; i < num_ops; i++) {
+ src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *)
+ + (ops[i]->sym->cipher.data.offset >> 3);
+ dst[i] = ops[i]->sym->m_dst
+ ? rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *)
+ + (ops[i]->sym->cipher.data.offset >> 3)
+ : rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *)
+ + (ops[i]->sym->cipher.data.offset >> 3);
+ iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ session->cipher_iv_offset);
+ iv[i] = *((uint64_t *)(iv_ptr));
+ num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+
+ processed_ops++;
+ }
+
+ if (processed_ops != 0)
+ IMB_KASUMI_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher,
+ iv, src, dst, num_bytes,
+ processed_ops);
+
+ return processed_ops;
+}
+
+/** Encrypt/decrypt mbuf (bit level function). */
+static uint8_t
+process_kasumi_cipher_op_bit(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
+ struct kasumi_session *session)
+{
+ uint8_t *src, *dst;
+ uint8_t *iv_ptr;
+ uint64_t iv;
+ uint32_t length_in_bits, offset_in_bits;
+
+ offset_in_bits = op->sym->cipher.data.offset;
+ src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
+ if (op->sym->m_dst == NULL)
+ dst = src;
+ else
+ dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
+ iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->cipher_iv_offset);
+ iv = *((uint64_t *)(iv_ptr));
+ length_in_bits = op->sym->cipher.data.length;
+
+ IMB_KASUMI_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
+ src, dst, length_in_bits, offset_in_bits);
+
+ return 1;
+}
+
+/** Generate/verify hash from mbufs with same hash key. */
+static int
+process_kasumi_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
+ struct kasumi_session *session, uint8_t num_ops)
+{
+ unsigned int i;
+ uint8_t processed_ops = 0;
+ uint8_t *src, *dst;
+ uint32_t length_in_bits;
+ uint32_t num_bytes;
+ struct kasumi_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
+
+ for (i = 0; i < num_ops; i++) {
+ /* Data must be byte aligned */
+ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ IPSEC_MB_LOG(ERR, "Invalid Offset");
+ break;
+ }
+
+ length_in_bits = ops[i]->sym->auth.data.length;
+
+ src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *)
+ + (ops[i]->sym->auth.data.offset >> 3);
+ /* Direction from next bit after end of message */
+ num_bytes = length_in_bits >> 3;
+
+ if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ dst = qp_data->temp_digest;
+ IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
+ &session->pKeySched_hash, src,
+ num_bytes, dst);
+
+ /* Verify digest. */
+ if (memcmp(dst, ops[i]->sym->auth.digest.data,
+ KASUMI_DIGEST_LENGTH)
+ != 0)
+ ops[i]->status
+ = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ dst = ops[i]->sym->auth.digest.data;
+
+ IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
+ &session->pKeySched_hash, src,
+ num_bytes, dst);
+ }
+ processed_ops++;
+ }
+
+ return processed_ops;
+}
+
+/** Process a batch of crypto ops which shares the same session. */
+static int
+process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
+ struct ipsec_mb_qp *qp, uint8_t num_ops,
+ uint16_t *accumulated_enqueued_ops)
+{
+ unsigned int i;
+ unsigned int processed_ops;
+
+ switch (session->op) {
+ case IPSEC_MB_OP_ENCRYPT_ONLY:
+ case IPSEC_MB_OP_DECRYPT_ONLY:
+ processed_ops
+ = process_kasumi_cipher_op(qp, ops, session, num_ops);
+ break;
+ case IPSEC_MB_OP_HASH_GEN_ONLY:
+ case IPSEC_MB_OP_HASH_VERIFY_ONLY:
+ processed_ops
+ = process_kasumi_hash_op(qp, ops, session, num_ops);
+ break;
+ case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
+ case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
+ processed_ops
+ = process_kasumi_cipher_op(qp, ops, session, num_ops);
+ process_kasumi_hash_op(qp, ops, session, processed_ops);
+ break;
+ case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
+ case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
+ processed_ops
+ = process_kasumi_hash_op(qp, ops, session, num_ops);
+ process_kasumi_cipher_op(qp, ops, session, processed_ops);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_ops = 0;
+ }
+
+ for (i = 0; i < num_ops; i++) {
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /* Free session if a session-less crypto op. */
+ if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(session, 0, sizeof(struct kasumi_session));
+ memset(
+ ops[i]->sym->session, 0,
+ rte_cryptodev_sym_get_existing_header_session_size(
+ ops[i]->sym->session));
+ rte_mempool_put(qp->sess_mp_priv, session);
+ rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ }
+
+ *accumulated_enqueued_ops += i;
+ return processed_ops;
+}
+
+/** Process a crypto op with length/offset in bits. */
+static int
+process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
+ struct ipsec_mb_qp *qp, uint16_t *accumulated_enqueued_ops)
+{
+ unsigned int processed_op;
+
+ switch (session->op) {
+ /* case KASUMI_OP_ONLY_CIPHER: */
+ case IPSEC_MB_OP_ENCRYPT_ONLY:
+ case IPSEC_MB_OP_DECRYPT_ONLY:
+ processed_op = process_kasumi_cipher_op_bit(qp, op, session);
+ break;
+ /* case KASUMI_OP_ONLY_AUTH: */
+ case IPSEC_MB_OP_HASH_GEN_ONLY:
+ case IPSEC_MB_OP_HASH_VERIFY_ONLY:
+ processed_op = process_kasumi_hash_op(qp, &op, session, 1);
+ break;
+ /* case KASUMI_OP_CIPHER_AUTH: */
+ case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
+ processed_op = process_kasumi_cipher_op_bit(qp, op, session);
+ if (processed_op == 1)
+ process_kasumi_hash_op(qp, &op, session, 1);
+ break;
+ /* case KASUMI_OP_AUTH_CIPHER: */
+ case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
+ processed_op = process_kasumi_hash_op(qp, &op, session, 1);
+ if (processed_op == 1)
+ process_kasumi_cipher_op_bit(qp, op, session);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_op = 0;
+ }
+
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /* Free session if a session-less crypto op. */
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(op->sym->session, 0, sizeof(struct kasumi_session));
+ rte_cryptodev_sym_session_free(op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ *accumulated_enqueued_ops += processed_op;
+ return processed_op;
+}
+
+static uint16_t
+kasumi_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_crypto_op *c_ops[nb_ops];
+ struct rte_crypto_op *curr_c_op = NULL;
+
+ struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
+ struct ipsec_mb_qp *qp = queue_pair;
+ unsigned int i;
+ uint8_t burst_size = 0;
+ uint16_t enqueued_ops = 0;
+ uint8_t processed_ops;
+ unsigned int nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
+ (void **)ops, nb_ops, NULL);
+ for (i = 0; i < nb_dequeued; i++) {
+ curr_c_op = ops[i];
+
+#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
+ if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src)
+ || (curr_c_op->sym->m_dst != NULL
+ && !rte_pktmbuf_is_contiguous(
+ curr_c_op->sym->m_dst))) {
+ IPSEC_MB_LOG(ERR,
+ "PMD supports only contiguous mbufs, op (%p) provides noncontiguous mbuf as source/destination buffer.",
+ curr_c_op);
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+#endif
+
+ /* Set status as enqueued (not processed yet) by default. */
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ curr_sess = (struct kasumi_session *)
+ ipsec_mb_get_session_private(qp, curr_c_op);
+ if (unlikely(curr_sess == NULL
+ || curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
+ curr_c_op->status
+ = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ break;
+ }
+
+ /* If length/offset is at bit-level, process this buffer alone.
+ */
+ if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
+ || ((ops[i]->sym->cipher.data.offset % BYTE_LEN) != 0)) {
+ /* Process the ops of the previous session. */
+ if (prev_sess != NULL) {
+ processed_ops
+ = process_ops(c_ops, prev_sess, qp,
+ burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+
+ processed_ops = process_op_bit(curr_c_op, curr_sess,
+ qp, &enqueued_ops);
+ if (processed_ops != 1)
+ break;
+
+ continue;
+ }
+
+ /* Batch ops that share the same session. */
+ if (prev_sess == NULL) {
+ prev_sess = curr_sess;
+ c_ops[burst_size++] = curr_c_op;
+ } else if (curr_sess == prev_sess) {
+ c_ops[burst_size++] = curr_c_op;
+ /*
+ * When there are enough ops to process in a batch,
+ * process them, and start a new batch.
+ */
+ if (burst_size == KASUMI_MAX_BURST) {
+ processed_ops
+ = process_ops(c_ops, prev_sess, qp,
+ burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+ } else {
+ /*
+ * Different session, process the ops
+ * of the previous session.
+ */
+ processed_ops = process_ops(
+ c_ops, prev_sess, qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = curr_sess;
+
+ c_ops[burst_size++] = curr_c_op;
+ }
+ }
+
+ if (burst_size != 0) {
+ /* Process the crypto ops of the last session. */
+ processed_ops = process_ops(c_ops, prev_sess, qp, burst_size,
+ &enqueued_ops);
+ }
+ qp->stats.dequeued_count += i;
+
+ return i;
+}
+
+struct rte_cryptodev_ops kasumi_pmd_ops = {
+ .dev_configure = ipsec_mb_pmd_config,
+ .dev_start = ipsec_mb_pmd_start,
+ .dev_stop = ipsec_mb_pmd_stop,
+ .dev_close = ipsec_mb_pmd_close,
+
+ .stats_get = ipsec_mb_pmd_stats_get,
+ .stats_reset = ipsec_mb_pmd_stats_reset,
+
+ .dev_infos_get = ipsec_mb_pmd_info_get,
+
+ .queue_pair_setup = ipsec_mb_pmd_qp_setup,
+ .queue_pair_release = ipsec_mb_pmd_qp_release,
+
+ .sym_session_get_size = ipsec_mb_pmd_sym_session_get_size,
+ .sym_session_configure = ipsec_mb_pmd_sym_session_configure,
+ .sym_session_clear = ipsec_mb_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_kasumi_pmd_ops = &kasumi_pmd_ops;
+
+static int
+cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
+{
+ return cryptodev_ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_KASUMI);
+}
+
+static struct rte_vdev_driver cryptodev_kasumi_pmd_drv = {
+ .probe = cryptodev_kasumi_probe,
+ .remove = cryptodev_ipsec_mb_remove
+};
+
+static struct cryptodev_driver kasumi_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
+ "max_nb_queue_pairs=<int> socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv,
+ cryptodev_kasumi_pmd_drv.driver,
+ pmd_driver_id_kasumi);
+
+/* Constructor function to register kasumi PMD */
+RTE_INIT(ipsec_mb_register_kasumi)
+{
+ struct ipsec_mb_pmd_data *kasumi_data
+ = &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_KASUMI];
+
+ kasumi_data->caps = kasumi_capabilities;
+ kasumi_data->dequeue_burst = kasumi_pmd_dequeue_burst;
+ kasumi_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
+ | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
+ | RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
+ | RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT
+ | RTE_CRYPTODEV_FF_SYM_SESSIONLESS
+ | RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+ kasumi_data->internals_priv_size = 0;
+ kasumi_data->ops = &kasumi_pmd_ops;
+ kasumi_data->qp_priv_size = sizeof(struct kasumi_qp_data);
+ kasumi_data->session_configure = kasumi_session_configure;
+ kasumi_data->session_priv_size = sizeof(struct kasumi_session);
+}
diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
index a9de241714..20dcff40ee 100644
--- a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
@@ -37,6 +37,9 @@ extern RTE_DEFINE_PER_LCORE(MB_MGR *, mb_mgr);
#define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
/**< IPSEC Multi buffer PMD aesni_gcm device name */
+#define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
+/**< IPSEC Multi buffer PMD kasumi device name */
+
/** PMD LOGTYPE DRIVER, common to all PMDs */
extern int ipsec_mb_logtype_driver;
#define IPSEC_MB_LOG(level, fmt, ...) \
@@ -47,6 +50,7 @@ extern int ipsec_mb_logtype_driver;
enum ipsec_mb_pmd_types {
IPSEC_MB_PMD_TYPE_AESNI_MB = 0,
IPSEC_MB_PMD_TYPE_AESNI_GCM,
+ IPSEC_MB_PMD_TYPE_KASUMI,
IPSEC_MB_N_PMD_TYPES
};
@@ -67,6 +71,7 @@ enum ipsec_mb_operation {
extern uint8_t pmd_driver_id_aesni_mb;
extern uint8_t pmd_driver_id_aesni_gcm;
+extern uint8_t pmd_driver_id_kasumi;
/** Helper function. Gets driver ID based on PMD type */
static __rte_always_inline uint8_t
@@ -77,6 +82,8 @@ ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
return pmd_driver_id_aesni_mb;
case IPSEC_MB_PMD_TYPE_AESNI_GCM:
return pmd_driver_id_aesni_gcm;
+ case IPSEC_MB_PMD_TYPE_KASUMI:
+ return pmd_driver_id_kasumi;
default:
break;
}
diff --git a/drivers/crypto/kasumi/kasumi_pmd_private.h b/drivers/crypto/kasumi/kasumi_pmd_private.h
deleted file mode 100644
index abedcd616d..0000000000
--- a/drivers/crypto/kasumi/kasumi_pmd_private.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
- */
-
-#ifndef _KASUMI_PMD_PRIVATE_H_
-#define _KASUMI_PMD_PRIVATE_H_
-
-#include <intel-ipsec-mb.h>
-
-#define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
-/**< KASUMI PMD device name */
-
-/** KASUMI PMD LOGTYPE DRIVER */
-extern int kasumi_logtype_driver;
-
-#define KASUMI_LOG(level, fmt, ...) \
- rte_log(RTE_LOG_ ## level, kasumi_logtype_driver, \
- "%s() line %u: " fmt "\n", __func__, __LINE__, \
- ## __VA_ARGS__)
-
-#define KASUMI_DIGEST_LENGTH 4
-
-/** private data structure for each virtual KASUMI device */
-struct kasumi_private {
- unsigned max_nb_queue_pairs;
- /**< Max number of queue pairs supported by device */
- MB_MGR *mgr;
- /**< Multi-buffer instance */
-};
-
-/** KASUMI buffer queue pair */
-struct kasumi_qp {
- uint16_t id;
- /**< Queue Pair Identifier */
- char name[RTE_CRYPTODEV_NAME_MAX_LEN];
- /**< Unique Queue Pair Name */
- struct rte_ring *processed_ops;
- /**< Ring for placing processed ops */
- struct rte_mempool *sess_mp;
- /**< Session Mempool */
- struct rte_mempool *sess_mp_priv;
- /**< Session Private Data Mempool */
- struct rte_cryptodev_stats qp_stats;
- /**< Queue pair statistics */
- uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
- /**< Buffer used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
- MB_MGR *mgr;
- /**< Multi-buffer instance */
-} __rte_cache_aligned;
-
-enum kasumi_operation {
- KASUMI_OP_ONLY_CIPHER,
- KASUMI_OP_ONLY_AUTH,
- KASUMI_OP_CIPHER_AUTH,
- KASUMI_OP_AUTH_CIPHER,
- KASUMI_OP_NOT_SUPPORTED
-};
-
-/** KASUMI private session structure */
-struct kasumi_session {
- /* Keys have to be 16-byte aligned */
- kasumi_key_sched_t pKeySched_cipher;
- kasumi_key_sched_t pKeySched_hash;
- enum kasumi_operation op;
- enum rte_crypto_auth_operation auth_op;
- uint16_t cipher_iv_offset;
-} __rte_cache_aligned;
-
-
-int
-kasumi_set_session_parameters(MB_MGR *mgr, struct kasumi_session *sess,
- const struct rte_crypto_sym_xform *xform);
-
-
-/** device specific operations function pointer structure */
-extern struct rte_cryptodev_ops *rte_kasumi_pmd_ops;
-
-#endif /* _KASUMI_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/kasumi/meson.build b/drivers/crypto/kasumi/meson.build
deleted file mode 100644
index e6e0f08c3d..0000000000
--- a/drivers/crypto/kasumi/meson.build
+++ /dev/null
@@ -1,24 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018-2020 Intel Corporation
-
-IMB_required_ver = '0.53.0'
-lib = cc.find_library('IPSec_MB', required: false)
-if not lib.found()
- build = false
- reason = 'missing dependency, "libIPSec_MB"'
-else
- # version comes with quotes, so we split based on " and take the middle
- imb_ver = cc.get_define('IMB_VERSION_STR',
- prefix : '#include<intel-ipsec-mb.h>').split('"')[1]
-
- if (imb_ver == '') or (imb_ver.version_compare('<' + IMB_required_ver))
- reason = 'IPSec_MB version >= @0@ is required, found version @1@'.format(
- IMB_required_ver, imb_ver)
- build = false
- endif
-
-endif
-
-ext_deps += lib
-sources = files('rte_kasumi_pmd.c', 'rte_kasumi_pmd_ops.c')
-deps += ['bus_vdev']
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
deleted file mode 100644
index 48b7db9e9b..0000000000
--- a/drivers/crypto/kasumi/rte_kasumi_pmd.c
+++ /dev/null
@@ -1,642 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
- */
-
-#include <rte_common.h>
-#include <rte_hexdump.h>
-#include <rte_cryptodev.h>
-#include <rte_cryptodev_pmd.h>
-#include <rte_bus_vdev.h>
-#include <rte_malloc.h>
-#include <rte_cpuflags.h>
-
-#include "kasumi_pmd_private.h"
-
-#define KASUMI_KEY_LENGTH 16
-#define KASUMI_IV_LENGTH 8
-#define KASUMI_MAX_BURST 4
-#define BYTE_LEN 8
-
-static uint8_t cryptodev_driver_id;
-
-/** Get xform chain order. */
-static enum kasumi_operation
-kasumi_get_mode(const struct rte_crypto_sym_xform *xform)
-{
- if (xform == NULL)
- return KASUMI_OP_NOT_SUPPORTED;
-
- if (xform->next)
- if (xform->next->next != NULL)
- return KASUMI_OP_NOT_SUPPORTED;
-
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
- if (xform->next == NULL)
- return KASUMI_OP_ONLY_AUTH;
- else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
- return KASUMI_OP_AUTH_CIPHER;
- else
- return KASUMI_OP_NOT_SUPPORTED;
- }
-
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
- if (xform->next == NULL)
- return KASUMI_OP_ONLY_CIPHER;
- else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
- return KASUMI_OP_CIPHER_AUTH;
- else
- return KASUMI_OP_NOT_SUPPORTED;
- }
-
- return KASUMI_OP_NOT_SUPPORTED;
-}
-
-
-/** Parse crypto xform chain and set private session parameters. */
-int
-kasumi_set_session_parameters(MB_MGR *mgr, struct kasumi_session *sess,
- const struct rte_crypto_sym_xform *xform)
-{
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum kasumi_operation mode;
-
- /* Select Crypto operation - hash then cipher / cipher then hash */
- mode = kasumi_get_mode(xform);
-
- switch (mode) {
- case KASUMI_OP_CIPHER_AUTH:
- auth_xform = xform->next;
- /* Fall-through */
- case KASUMI_OP_ONLY_CIPHER:
- cipher_xform = xform;
- break;
- case KASUMI_OP_AUTH_CIPHER:
- cipher_xform = xform->next;
- /* Fall-through */
- case KASUMI_OP_ONLY_AUTH:
- auth_xform = xform;
- break;
- case KASUMI_OP_NOT_SUPPORTED:
- default:
- KASUMI_LOG(ERR, "Unsupported operation chain order parameter");
- return -ENOTSUP;
- }
-
- if (cipher_xform) {
- /* Only KASUMI F8 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
- KASUMI_LOG(ERR, "Unsupported cipher algorithm ");
- return -ENOTSUP;
- }
-
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
- if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
- KASUMI_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
-
- /* Initialize key */
- IMB_KASUMI_INIT_F8_KEY_SCHED(mgr, cipher_xform->cipher.key.data,
- &sess->pKeySched_cipher);
- }
-
- if (auth_xform) {
- /* Only KASUMI F9 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
- KASUMI_LOG(ERR, "Unsupported authentication");
- return -ENOTSUP;
- }
-
- if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
- KASUMI_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- /* Initialize key */
- IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, auth_xform->auth.key.data,
- &sess->pKeySched_hash);
- }
-
-
- sess->op = mode;
-
- return 0;
-}
-
-/** Get KASUMI session. */
-static struct kasumi_session *
-kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op)
-{
- struct kasumi_session *sess = NULL;
-
- if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
- if (likely(op->sym->session != NULL))
- sess = (struct kasumi_session *)
- get_sym_session_private_data(
- op->sym->session,
- cryptodev_driver_id);
- } else {
- void *_sess = NULL;
- void *_sess_private_data = NULL;
-
- if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
- return NULL;
-
- if (rte_mempool_get(qp->sess_mp_priv,
- (void **)&_sess_private_data))
- return NULL;
-
- sess = (struct kasumi_session *)_sess_private_data;
-
- if (unlikely(kasumi_set_session_parameters(qp->mgr, sess,
- op->sym->xform) != 0)) {
- rte_mempool_put(qp->sess_mp, _sess);
- rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
- sess = NULL;
- }
- op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
- set_sym_session_private_data(op->sym->session,
- cryptodev_driver_id, _sess_private_data);
- }
-
- if (unlikely(sess == NULL))
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-
- return sess;
-}
-
-/** Encrypt/decrypt mbufs with same cipher key. */
-static uint8_t
-process_kasumi_cipher_op(struct kasumi_qp *qp, struct rte_crypto_op **ops,
- struct kasumi_session *session, uint8_t num_ops)
-{
- unsigned i;
- uint8_t processed_ops = 0;
- const void *src[num_ops];
- void *dst[num_ops];
- uint8_t *iv_ptr;
- uint64_t iv[num_ops];
- uint32_t num_bytes[num_ops];
-
- for (i = 0; i < num_ops; i++) {
- src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
- (ops[i]->sym->cipher.data.offset >> 3);
- dst[i] = ops[i]->sym->m_dst ?
- rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
- (ops[i]->sym->cipher.data.offset >> 3) :
- rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
- (ops[i]->sym->cipher.data.offset >> 3);
- iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->cipher_iv_offset);
- iv[i] = *((uint64_t *)(iv_ptr));
- num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
-
- processed_ops++;
- }
-
- if (processed_ops != 0)
- IMB_KASUMI_F8_N_BUFFER(qp->mgr, &session->pKeySched_cipher, iv,
- src, dst, num_bytes, processed_ops);
-
- return processed_ops;
-}
-
-/** Encrypt/decrypt mbuf (bit level function). */
-static uint8_t
-process_kasumi_cipher_op_bit(struct kasumi_qp *qp, struct rte_crypto_op *op,
- struct kasumi_session *session)
-{
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- uint64_t iv;
- uint32_t length_in_bits, offset_in_bits;
-
- offset_in_bits = op->sym->cipher.data.offset;
- src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
- if (op->sym->m_dst == NULL)
- dst = src;
- else
- dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->cipher_iv_offset);
- iv = *((uint64_t *)(iv_ptr));
- length_in_bits = op->sym->cipher.data.length;
-
- IMB_KASUMI_F8_1_BUFFER_BIT(qp->mgr, &session->pKeySched_cipher, iv,
- src, dst, length_in_bits, offset_in_bits);
-
- return 1;
-}
-
-/** Generate/verify hash from mbufs with same hash key. */
-static int
-process_kasumi_hash_op(struct kasumi_qp *qp, struct rte_crypto_op **ops,
- struct kasumi_session *session,
- uint8_t num_ops)
-{
- unsigned i;
- uint8_t processed_ops = 0;
- uint8_t *src, *dst;
- uint32_t length_in_bits;
- uint32_t num_bytes;
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- KASUMI_LOG(ERR, "Invalid Offset");
- break;
- }
-
- length_in_bits = ops[i]->sym->auth.data.length;
-
- src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
- (ops[i]->sym->auth.data.offset >> 3);
- /* Direction from next bit after end of message */
- num_bytes = length_in_bits >> 3;
-
- if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = qp->temp_digest;
- IMB_KASUMI_F9_1_BUFFER(qp->mgr,
- &session->pKeySched_hash, src,
- num_bytes, dst);
-
- /* Verify digest. */
- if (memcmp(dst, ops[i]->sym->auth.digest.data,
- KASUMI_DIGEST_LENGTH) != 0)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- dst = ops[i]->sym->auth.digest.data;
-
- IMB_KASUMI_F9_1_BUFFER(qp->mgr,
- &session->pKeySched_hash, src,
- num_bytes, dst);
- }
- processed_ops++;
- }
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same session. */
-static int
-process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
- struct kasumi_qp *qp, uint8_t num_ops,
- uint16_t *accumulated_enqueued_ops)
-{
- unsigned i;
- unsigned enqueued_ops, processed_ops;
-
- switch (session->op) {
- case KASUMI_OP_ONLY_CIPHER:
- processed_ops = process_kasumi_cipher_op(qp, ops,
- session, num_ops);
- break;
- case KASUMI_OP_ONLY_AUTH:
- processed_ops = process_kasumi_hash_op(qp, ops, session,
- num_ops);
- break;
- case KASUMI_OP_CIPHER_AUTH:
- processed_ops = process_kasumi_cipher_op(qp, ops, session,
- num_ops);
- process_kasumi_hash_op(qp, ops, session, processed_ops);
- break;
- case KASUMI_OP_AUTH_CIPHER:
- processed_ops = process_kasumi_hash_op(qp, ops, session,
- num_ops);
- process_kasumi_cipher_op(qp, ops, session, processed_ops);
- break;
- default:
- /* Operation not supported. */
- processed_ops = 0;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(session, 0, sizeof(struct kasumi_session));
- memset(ops[i]->sym->session, 0,
- rte_cryptodev_sym_get_existing_header_session_size(
- ops[i]->sym->session));
- rte_mempool_put(qp->sess_mp_priv, session);
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
-
- enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
- (void **)ops, processed_ops, NULL);
- qp->qp_stats.enqueued_count += enqueued_ops;
- *accumulated_enqueued_ops += enqueued_ops;
-
- return enqueued_ops;
-}
-
-/** Process a crypto op with length/offset in bits. */
-static int
-process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
- struct kasumi_qp *qp, uint16_t *accumulated_enqueued_ops)
-{
- unsigned enqueued_op, processed_op;
-
- switch (session->op) {
- case KASUMI_OP_ONLY_CIPHER:
- processed_op = process_kasumi_cipher_op_bit(qp, op,
- session);
- break;
- case KASUMI_OP_ONLY_AUTH:
- processed_op = process_kasumi_hash_op(qp, &op, session, 1);
- break;
- case KASUMI_OP_CIPHER_AUTH:
- processed_op = process_kasumi_cipher_op_bit(qp, op, session);
- if (processed_op == 1)
- process_kasumi_hash_op(qp, &op, session, 1);
- break;
- case KASUMI_OP_AUTH_CIPHER:
- processed_op = process_kasumi_hash_op(qp, &op, session, 1);
- if (processed_op == 1)
- process_kasumi_cipher_op_bit(qp, op, session);
- break;
- default:
- /* Operation not supported. */
- processed_op = 0;
- }
-
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
- /* Free session if a session-less crypto op. */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(op->sym->session, 0, sizeof(struct kasumi_session));
- rte_cryptodev_sym_session_free(op->sym->session);
- op->sym->session = NULL;
- }
-
- enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, (void **)&op,
- processed_op, NULL);
- qp->qp_stats.enqueued_count += enqueued_op;
- *accumulated_enqueued_ops += enqueued_op;
-
- return enqueued_op;
-}
-
-static uint16_t
-kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct rte_crypto_op *c_ops[nb_ops];
- struct rte_crypto_op *curr_c_op;
-
- struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
- struct kasumi_qp *qp = queue_pair;
- unsigned i;
- uint8_t burst_size = 0;
- uint16_t enqueued_ops = 0;
- uint8_t processed_ops;
-
- for (i = 0; i < nb_ops; i++) {
- curr_c_op = ops[i];
-
-#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
- if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src) ||
- (curr_c_op->sym->m_dst != NULL &&
- !rte_pktmbuf_is_contiguous(
- curr_c_op->sym->m_dst))) {
- KASUMI_LOG(ERR, "PMD supports only contiguous mbufs, "
- "op (%p) provides noncontiguous mbuf as "
- "source/destination buffer.", curr_c_op);
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- break;
- }
-#endif
-
- /* Set status as enqueued (not processed yet) by default. */
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
- curr_sess = kasumi_get_session(qp, curr_c_op);
- if (unlikely(curr_sess == NULL ||
- curr_sess->op == KASUMI_OP_NOT_SUPPORTED)) {
- curr_c_op->status =
- RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- /* If length/offset is at bit-level, process this buffer alone. */
- if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((ops[i]->sym->cipher.data.offset
- % BYTE_LEN) != 0)) {
- /* Process the ops of the previous session. */
- if (prev_sess != NULL) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size, &enqueued_ops);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
-
- processed_ops = process_op_bit(curr_c_op, curr_sess,
- qp, &enqueued_ops);
- if (processed_ops != 1)
- break;
-
- continue;
- }
-
- /* Batch ops that share the same session. */
- if (prev_sess == NULL) {
- prev_sess = curr_sess;
- c_ops[burst_size++] = curr_c_op;
- } else if (curr_sess == prev_sess) {
- c_ops[burst_size++] = curr_c_op;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == KASUMI_MAX_BURST) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size, &enqueued_ops);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
- } else {
- /*
- * Different session, process the ops
- * of the previous session.
- */
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size, &enqueued_ops);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = curr_sess;
-
- c_ops[burst_size++] = curr_c_op;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last session. */
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size, &enqueued_ops);
- }
-
- qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
- return enqueued_ops;
-}
-
-static uint16_t
-kasumi_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **c_ops, uint16_t nb_ops)
-{
- struct kasumi_qp *qp = queue_pair;
-
- unsigned nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)c_ops, nb_ops, NULL);
- qp->qp_stats.dequeued_count += nb_dequeued;
-
- return nb_dequeued;
-}
-
-static int cryptodev_kasumi_remove(struct rte_vdev_device *vdev);
-
-static int
-cryptodev_kasumi_create(const char *name,
- struct rte_vdev_device *vdev,
- struct rte_cryptodev_pmd_init_params *init_params)
-{
- struct rte_cryptodev *dev;
- struct kasumi_private *internals;
- MB_MGR *mgr;
-
- dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
- if (dev == NULL) {
- KASUMI_LOG(ERR, "failed to create cryptodev vdev");
- goto init_error;
- }
-
- dev->driver_id = cryptodev_driver_id;
- dev->dev_ops = rte_kasumi_pmd_ops;
-
- /* Register RX/TX burst functions for data path. */
- dev->dequeue_burst = kasumi_pmd_dequeue_burst;
- dev->enqueue_burst = kasumi_pmd_enqueue_burst;
-
- dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
- RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
- RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
- RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
- RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
-
- mgr = alloc_mb_mgr(0);
- if (mgr == NULL)
- return -ENOMEM;
-
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) {
- dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
- init_mb_mgr_avx(mgr);
- } else {
- dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
- init_mb_mgr_sse(mgr);
- }
-
- internals = dev->data->dev_private;
-
- internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
- internals->mgr = mgr;
-
- return 0;
-init_error:
- KASUMI_LOG(ERR, "driver %s: failed",
- init_params->name);
-
- cryptodev_kasumi_remove(vdev);
- return -EFAULT;
-}
-
-static int
-cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
-{
- struct rte_cryptodev_pmd_init_params init_params = {
- "",
- sizeof(struct kasumi_private),
- rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
- };
- const char *name;
- const char *input_args;
-
- name = rte_vdev_device_name(vdev);
- if (name == NULL)
- return -EINVAL;
- input_args = rte_vdev_device_args(vdev);
-
- rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
-
- return cryptodev_kasumi_create(name, vdev, &init_params);
-}
-
-static int
-cryptodev_kasumi_remove(struct rte_vdev_device *vdev)
-{
- struct rte_cryptodev *cryptodev;
- const char *name;
- struct kasumi_private *internals;
-
- name = rte_vdev_device_name(vdev);
- if (name == NULL)
- return -EINVAL;
-
- cryptodev = rte_cryptodev_pmd_get_named_dev(name);
- if (cryptodev == NULL)
- return -ENODEV;
-
- internals = cryptodev->data->dev_private;
-
- free_mb_mgr(internals->mgr);
-
- return rte_cryptodev_pmd_destroy(cryptodev);
-}
-
-static struct rte_vdev_driver cryptodev_kasumi_pmd_drv = {
- .probe = cryptodev_kasumi_probe,
- .remove = cryptodev_kasumi_remove
-};
-
-static struct cryptodev_driver kasumi_crypto_drv;
-
-RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv);
-RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd);
-RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
- "max_nb_queue_pairs=<int> "
- "socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv,
- cryptodev_kasumi_pmd_drv.driver, cryptodev_driver_id);
-
-RTE_LOG_REGISTER_DEFAULT(kasumi_logtype_driver, NOTICE);
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
deleted file mode 100644
index 43376c1aa0..0000000000
--- a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
+++ /dev/null
@@ -1,316 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
- */
-
-#include <string.h>
-
-#include <rte_common.h>
-#include <rte_malloc.h>
-#include <rte_cryptodev_pmd.h>
-
-#include "kasumi_pmd_private.h"
-
-static const struct rte_cryptodev_capabilities kasumi_pmd_capabilities[] = {
- { /* KASUMI (F9) */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_KASUMI_F9,
- .block_size = 8,
- .key_size = {
- .min = 16,
- .max = 16,
- .increment = 0
- },
- .digest_size = {
- .min = 4,
- .max = 4,
- .increment = 0
- },
- .iv_size = { 0 }
- }, }
- }, }
- },
- { /* KASUMI (F8) */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
- {.cipher = {
- .algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
- .block_size = 8,
- .key_size = {
- .min = 16,
- .max = 16,
- .increment = 0
- },
- .iv_size = {
- .min = 8,
- .max = 8,
- .increment = 0
- }
- }, }
- }, }
- },
- RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
-};
-
-/** Configure device */
-static int
-kasumi_pmd_config(__rte_unused struct rte_cryptodev *dev,
- __rte_unused struct rte_cryptodev_config *config)
-{
- return 0;
-}
-
-/** Start device */
-static int
-kasumi_pmd_start(__rte_unused struct rte_cryptodev *dev)
-{
- return 0;
-}
-
-/** Stop device */
-static void
-kasumi_pmd_stop(__rte_unused struct rte_cryptodev *dev)
-{
-}
-
-/** Close device */
-static int
-kasumi_pmd_close(__rte_unused struct rte_cryptodev *dev)
-{
- return 0;
-}
-
-
-/** Get device statistics */
-static void
-kasumi_pmd_stats_get(struct rte_cryptodev *dev,
- struct rte_cryptodev_stats *stats)
-{
- int qp_id;
-
- for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
- struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
-
- stats->enqueued_count += qp->qp_stats.enqueued_count;
- stats->dequeued_count += qp->qp_stats.dequeued_count;
-
- stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
- stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
- }
-}
-
-/** Reset device statistics */
-static void
-kasumi_pmd_stats_reset(struct rte_cryptodev *dev)
-{
- int qp_id;
-
- for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
- struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
-
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
- }
-}
-
-
-/** Get device info */
-static void
-kasumi_pmd_info_get(struct rte_cryptodev *dev,
- struct rte_cryptodev_info *dev_info)
-{
- struct kasumi_private *internals = dev->data->dev_private;
-
- if (dev_info != NULL) {
- dev_info->driver_id = dev->driver_id;
- dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- /* No limit of number of sessions */
- dev_info->sym.max_nb_sessions = 0;
- dev_info->feature_flags = dev->feature_flags;
- dev_info->capabilities = kasumi_pmd_capabilities;
- }
-}
-
-/** Release queue pair */
-static int
-kasumi_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
-{
- struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
-
- if (qp != NULL) {
- rte_ring_free(qp->processed_ops);
- rte_free(qp);
- dev->data->queue_pairs[qp_id] = NULL;
- }
- return 0;
-}
-
-/** set a unique name for the queue pair based on its name, dev_id and qp_id */
-static int
-kasumi_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
- struct kasumi_qp *qp)
-{
- unsigned n = snprintf(qp->name, sizeof(qp->name),
- "kasumi_pmd_%u_qp_%u",
- dev->data->dev_id, qp->id);
-
- if (n >= sizeof(qp->name))
- return -1;
-
- return 0;
-}
-
-/** Create a ring to place processed ops on */
-static struct rte_ring *
-kasumi_pmd_qp_create_processed_ops_ring(struct kasumi_qp *qp,
- unsigned ring_size, int socket_id)
-{
- struct rte_ring *r;
-
- r = rte_ring_lookup(qp->name);
- if (r) {
- if (rte_ring_get_size(r) == ring_size) {
- KASUMI_LOG(INFO, "Reusing existing ring %s"
- " for processed packets",
- qp->name);
- return r;
- }
-
- KASUMI_LOG(ERR, "Unable to reuse existing ring %s"
- " for processed packets",
- qp->name);
- return NULL;
- }
-
- return rte_ring_create(qp->name, ring_size, socket_id,
- RING_F_SP_ENQ | RING_F_SC_DEQ);
-}
-
-/** Setup a queue pair */
-static int
-kasumi_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
- const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
-{
- struct kasumi_qp *qp = NULL;
- struct kasumi_private *internals = dev->data->dev_private;
-
- /* Free memory prior to re-allocation if needed. */
- if (dev->data->queue_pairs[qp_id] != NULL)
- kasumi_pmd_qp_release(dev, qp_id);
-
- /* Allocate the queue pair data structure. */
- qp = rte_zmalloc_socket("KASUMI PMD Queue Pair", sizeof(*qp),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (qp == NULL)
- return (-ENOMEM);
-
- qp->id = qp_id;
- dev->data->queue_pairs[qp_id] = qp;
-
- if (kasumi_pmd_qp_set_unique_name(dev, qp))
- goto qp_setup_cleanup;
-
- qp->processed_ops = kasumi_pmd_qp_create_processed_ops_ring(qp,
- qp_conf->nb_descriptors, socket_id);
- if (qp->processed_ops == NULL)
- goto qp_setup_cleanup;
-
- qp->mgr = internals->mgr;
- qp->sess_mp = qp_conf->mp_session;
- qp->sess_mp_priv = qp_conf->mp_session_private;
-
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
-
- return 0;
-
-qp_setup_cleanup:
- rte_free(qp);
-
- return -1;
-}
-
-/** Returns the size of the KASUMI session structure */
-static unsigned
-kasumi_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
-{
- return sizeof(struct kasumi_session);
-}
-
-/** Configure a KASUMI session from a crypto xform chain */
-static int
-kasumi_pmd_sym_session_configure(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct rte_cryptodev_sym_session *sess,
- struct rte_mempool *mempool)
-{
- void *sess_private_data;
- int ret;
- struct kasumi_private *internals = dev->data->dev_private;
-
- if (unlikely(sess == NULL)) {
- KASUMI_LOG(ERR, "invalid session struct");
- return -EINVAL;
- }
-
- if (rte_mempool_get(mempool, &sess_private_data)) {
- KASUMI_LOG(ERR,
- "Couldn't get object from session mempool");
- return -ENOMEM;
- }
-
- ret = kasumi_set_session_parameters(internals->mgr,
- sess_private_data, xform);
- if (ret != 0) {
- KASUMI_LOG(ERR, "failed configure session parameters");
-
- /* Return session to mempool */
- rte_mempool_put(mempool, sess_private_data);
- return ret;
- }
-
- set_sym_session_private_data(sess, dev->driver_id,
- sess_private_data);
-
- return 0;
-}
-
-/** Clear the memory of session so it doesn't leave key material behind */
-static void
-kasumi_pmd_sym_session_clear(struct rte_cryptodev *dev,
- struct rte_cryptodev_sym_session *sess)
-{
- uint8_t index = dev->driver_id;
- void *sess_priv = get_sym_session_private_data(sess, index);
-
- /* Zero out the whole structure */
- if (sess_priv) {
- memset(sess_priv, 0, sizeof(struct kasumi_session));
- struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_sym_session_private_data(sess, index, NULL);
- rte_mempool_put(sess_mp, sess_priv);
- }
-}
-
-struct rte_cryptodev_ops kasumi_pmd_ops = {
- .dev_configure = kasumi_pmd_config,
- .dev_start = kasumi_pmd_start,
- .dev_stop = kasumi_pmd_stop,
- .dev_close = kasumi_pmd_close,
-
- .stats_get = kasumi_pmd_stats_get,
- .stats_reset = kasumi_pmd_stats_reset,
-
- .dev_infos_get = kasumi_pmd_info_get,
-
- .queue_pair_setup = kasumi_pmd_qp_setup,
- .queue_pair_release = kasumi_pmd_qp_release,
-
- .sym_session_get_size = kasumi_pmd_sym_session_get_size,
- .sym_session_configure = kasumi_pmd_sym_session_configure,
- .sym_session_clear = kasumi_pmd_sym_session_clear
-};
-
-struct rte_cryptodev_ops *rte_kasumi_pmd_ops = &kasumi_pmd_ops;
diff --git a/drivers/crypto/kasumi/version.map b/drivers/crypto/kasumi/version.map
deleted file mode 100644
index 4a76d1d52d..0000000000
--- a/drivers/crypto/kasumi/version.map
+++ /dev/null
@@ -1,3 +0,0 @@
-DPDK_21 {
- local: *;
-};
diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
index 0ba62e94ca..c1f854977c 100644
--- a/drivers/crypto/meson.build
+++ b/drivers/crypto/meson.build
@@ -13,7 +13,6 @@ drivers = [
'ccp',
'dpaa_sec',
'dpaa2_sec',
- 'kasumi',
'mvsam',
'nitrox',
'null',
--
2.25.1
--------------------------------------------------------------
Intel Research and Development Ireland Limited
Registered in Ireland
Registered Office: Collinstown Industrial Park, Leixlip, County Kildare
Registered Number: 308263
This e-mail and any attachments may contain confidential material for the sole
use of the intended recipient(s). Any review or distribution by others is
strictly prohibited. If you are not the intended recipient, please contact the
sender and delete all copies.
next prev parent reply other threads:[~2021-06-18 12:19 UTC|newest]
Thread overview: 67+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-06-18 12:17 [dpdk-dev] [RFC 0/7] crypto/ipsec_mb: introduce " pbronowx
2021-06-18 12:17 ` [dpdk-dev] [RFC 1/7] " pbronowx
2021-06-18 12:17 ` [dpdk-dev] [RFC 2/7] crypto/ipsec_mb: move aesni-mb PMD to " pbronowx
2021-06-18 12:17 ` [dpdk-dev] [RFC 3/7] crypto/ipsec_mb: move aesni-gcm " pbronowx
2021-06-18 12:18 ` pbronowx [this message]
2021-06-18 12:18 ` [dpdk-dev] [RFC 5/7] crypto/ipsec_mb: move snow3g " pbronowx
2021-06-18 12:18 ` [dpdk-dev] [RFC 6/7] crypto/snow3g: add support for digest appended ops pbronowx
2021-06-18 12:18 ` [dpdk-dev] [RFC 7/7] crypto/ipsec_mb: move zuc PMD to ipsec_mb framework pbronowx
2021-06-18 13:11 ` [dpdk-dev] [RFC 0/7] crypto/ipsec_mb: introduce " David Marchand
2021-06-18 16:05 ` [dpdk-dev] [EXT] " Akhil Goyal
2021-06-21 8:52 ` Zhang, Roy Fan
2021-08-26 15:16 ` [dpdk-dev] [PATCH v1 0/8] drivers/crypto: " Ciara Power
2021-08-26 15:16 ` [dpdk-dev] [PATCH v1 1/8] drivers/crypto: introduce IPsec-mb framework Ciara Power
2021-08-26 15:16 ` [dpdk-dev] [PATCH v1 2/8] drivers/crypto: move aesni-mb PMD to " Ciara Power
2021-08-26 15:16 ` [dpdk-dev] [PATCH v1 3/8] drivers/crypto: move aesni-gcm " Ciara Power
2021-08-26 15:16 ` [dpdk-dev] [PATCH v1 4/8] drivers/crypto: move kasumi " Ciara Power
2021-08-26 15:16 ` [dpdk-dev] [PATCH v1 5/8] drivers/crypto: move snow3g " Ciara Power
2021-08-26 15:16 ` [dpdk-dev] [PATCH v1 6/8] crypto/ipsec_mb: add snow3g digest appended ops support Ciara Power
2021-08-26 15:16 ` [dpdk-dev] [PATCH v1 7/8] drivers/crypto: move zuc PMD to IPsec-mb framework Ciara Power
2021-08-26 15:16 ` [dpdk-dev] [PATCH v1 8/8] crypto/ipsec_mb: add chacha20-poly1305 PMD to framework Ciara Power
2021-09-23 15:28 ` [dpdk-dev] [PATCH v2 0/9] drivers/crypto: introduce ipsec_mb framework Ciara Power
2021-09-23 15:28 ` [dpdk-dev] [PATCH v2 1/9] drivers/crypto: introduce IPsec-mb framework Ciara Power
2021-09-23 15:46 ` Thomas Monjalon
2021-09-27 10:02 ` Power, Ciara
2021-09-23 15:28 ` [dpdk-dev] [PATCH v2 2/9] drivers/crypto: move aesni-mb PMD to " Ciara Power
2021-09-23 15:28 ` [dpdk-dev] [PATCH v2 3/9] drivers/crypto: move aesni-gcm " Ciara Power
2021-09-23 15:28 ` [dpdk-dev] [PATCH v2 4/9] drivers/crypto: move kasumi " Ciara Power
2021-09-23 15:28 ` [dpdk-dev] [PATCH v2 5/9] drivers/crypto: move snow3g " Ciara Power
2021-09-23 15:28 ` [dpdk-dev] [PATCH v2 6/9] crypto/ipsec_mb: add snow3g digest appended ops support Ciara Power
2021-09-23 15:28 ` [dpdk-dev] [PATCH v2 7/9] drivers/crypto: move zuc PMD to IPsec-mb framework Ciara Power
2021-09-23 15:28 ` [dpdk-dev] [PATCH v2 8/9] crypto/ipsec_mb: add chacha20-poly1305 PMD to framework Ciara Power
2021-09-23 15:28 ` [dpdk-dev] [PATCH v2 9/9] doc/rel_notes: added note for SW Crypto PMD change Ciara Power
2021-09-28 16:43 ` [dpdk-dev] [EXT] [PATCH v2 0/9] drivers/crypto: introduce ipsec_mb framework Akhil Goyal
2021-10-15 14:39 ` [dpdk-dev] [PATCH v4 00/14] " Ciara Power
2021-10-15 14:39 ` [dpdk-dev] [PATCH v4 01/14] drivers/crypto: introduce IPsec-mb framework Ciara Power
2021-10-15 14:39 ` [dpdk-dev] [PATCH v4 02/14] crypto/ipsec_mb: add multiprocess support Ciara Power
2021-10-15 14:39 ` [dpdk-dev] [PATCH v4 03/14] drivers/crypto: move aesni-mb PMD to IPsec-mb framework Ciara Power
2021-10-18 7:38 ` Zhang, Roy Fan
2021-10-15 14:39 ` [dpdk-dev] [PATCH v4 04/14] crypto/ipsec_mb: support ZUC-256 for aesni_mb Ciara Power
2021-10-18 7:39 ` Zhang, Roy Fan
2021-10-15 14:39 ` [dpdk-dev] [PATCH v4 05/14] test/crypto: check cipher parameters Ciara Power
2021-10-18 7:40 ` Zhang, Roy Fan
2021-10-15 14:39 ` [dpdk-dev] [PATCH v4 06/14] test/crypto: check auth parameters Ciara Power
2021-10-18 7:41 ` Zhang, Roy Fan
2021-10-15 14:39 ` [dpdk-dev] [PATCH v4 07/14] test/crypto: add ZUC-256 vectors Ciara Power
2021-10-18 7:42 ` Zhang, Roy Fan
2021-10-15 14:39 ` [dpdk-dev] [PATCH v4 08/14] drivers/crypto: move aesni-gcm PMD to IPsec-mb framework Ciara Power
2021-10-18 7:43 ` Zhang, Roy Fan
2021-10-15 14:39 ` [dpdk-dev] [PATCH v4 09/14] drivers/crypto: move kasumi " Ciara Power
2021-10-18 7:42 ` Zhang, Roy Fan
2021-10-15 14:39 ` [dpdk-dev] [PATCH v4 10/14] drivers/crypto: move snow3g " Ciara Power
2021-10-18 7:42 ` Zhang, Roy Fan
2021-10-15 14:39 ` [dpdk-dev] [PATCH v4 11/14] crypto/ipsec_mb: add snow3g digest appended ops support Ciara Power
2021-10-18 7:43 ` Zhang, Roy Fan
2021-10-15 14:39 ` [dpdk-dev] [PATCH v4 12/14] drivers/crypto: move zuc PMD to IPsec-mb framework Ciara Power
2021-10-18 7:44 ` Zhang, Roy Fan
2021-10-15 14:39 ` [dpdk-dev] [PATCH v4 13/14] crypto/ipsec_mb: add chacha20-poly1305 PMD to framework Ciara Power
2021-10-18 7:44 ` Zhang, Roy Fan
2021-10-15 14:39 ` [dpdk-dev] [PATCH v4 14/14] test/crypto: add test for chacha20_poly1305 PMD Ciara Power
2021-10-18 7:44 ` Zhang, Roy Fan
2021-10-18 15:21 ` [dpdk-dev] [EXT] [PATCH v4 00/14] drivers/crypto: introduce ipsec_mb framework Akhil Goyal
2021-10-19 23:09 ` Thomas Monjalon
2021-10-20 4:23 ` Akhil Goyal
2021-10-20 8:31 ` Akhil Goyal
2021-10-20 9:01 ` Thomas Monjalon
2021-10-20 9:00 ` Zhang, Roy Fan
2021-10-20 9:04 ` Akhil Goyal
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210618121803.1189857-5-piotrx.bronowski@intel.com \
--to=piotrx.bronowski@intel.com \
--cc=declan.doherty@intel.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=gakhil@marvell.com \
--cc=roy.fan.zhang@intel.com \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).