* [dpdk-dev] [PATCH] cryptodev: API change to rte_crypto_op bursts
@ 2016-01-30 13:07 Declan Doherty
2016-02-08 17:50 ` Trahe, Fiona
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 0/2] cryptodev API changes Declan Doherty
0 siblings, 2 replies; 62+ messages in thread
From: Declan Doherty @ 2016-01-30 13:07 UTC (permalink / raw)
To: dev
This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts
rte_crypto_op's rather than the current implementation which operates on
rte_mbuf bursts which simplifies the burst processing in the crypto PMDs and the
use of crypto operations in general.
The changes also separates the symmetric operation parameters from the more
general operation parameters, this will simplify the integration of
asymmetric crypto operations in the future.
As well as the changes to the crypto APIs this patch adds function for managing
rte_crypto_op pools to the cryptodev API. It modifies the existing PMDs, unit
tests and sample application to work with the modified APIs and finally
removes the now unused rte_mbuf_offload library.
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
---
MAINTAINERS | 4 -
app/test/test_cryptodev.c | 713 +++++++++++----------
app/test/test_cryptodev.h | 8 +-
app/test/test_cryptodev_perf.c | 242 +++----
config/common_bsdapp | 7 -
config/common_linuxapp | 7 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 182 +++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 12 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 2 +-
drivers/crypto/qat/qat_crypto.c | 117 ++--
drivers/crypto/qat/qat_crypto.h | 10 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 4 +-
examples/l2fwd-crypto/main.c | 229 +++----
lib/Makefile | 1 -
lib/librte_cryptodev/rte_crypto.h | 578 ++++++++++++-----
lib/librte_cryptodev/rte_cryptodev.c | 77 +++
lib/librte_cryptodev/rte_cryptodev.h | 105 +--
lib/librte_cryptodev/rte_cryptodev_version.map | 1 +
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 307 ---------
.../rte_mbuf_offload_version.map | 7 -
24 files changed, 1342 insertions(+), 1430 deletions(-)
delete mode 100644 lib/librte_mbuf_offload/Makefile
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
diff --git a/MAINTAINERS b/MAINTAINERS
index b90aeea..7082e20 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -218,10 +218,6 @@ F: lib/librte_mbuf/
F: doc/guides/prog_guide/mbuf_lib.rst
F: app/test/test_mbuf.c
-Packet buffer offload - EXPERIMENTAL
-M: Declan Doherty <declan.doherty@intel.com>
-F: lib/librte_mbuf_offload/
-
Ethernet API
M: Thomas Monjalon <thomas.monjalon@6wind.com>
F: lib/librte_ether/
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index fd5b7ec..283395e 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -35,7 +35,6 @@
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#include <rte_mbuf_offload.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
@@ -48,7 +47,7 @@ static enum rte_cryptodev_type gbl_cryptodev_type;
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
struct rte_cryptodev_config conf;
struct rte_cryptodev_qp_conf qp_conf;
@@ -62,7 +61,6 @@ struct crypto_unittest_params {
struct rte_cryptodev_session *sess;
- struct rte_mbuf_offload *ol;
struct rte_crypto_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -104,7 +102,7 @@ setup_test_string(struct rte_mempool *mpool,
return m;
}
-#if HEX_DUMP
+#ifdef HEX_DUMP
static void
hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
{
@@ -112,27 +110,29 @@ hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
}
#endif
-static struct rte_mbuf *
-process_crypto_request(uint8_t dev_id, struct rte_mbuf *ibuf)
+static struct rte_crypto_op *
+process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)
{
- struct rte_mbuf *obuf = NULL;
-#if HEX_DUMP
+#ifdef HEX_DUMP
hexdump_mbuf_data(stdout, "Enqueued Packet", ibuf);
#endif
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &ibuf, 1) != 1) {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
printf("Error sending packet for encryption");
return NULL;
}
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &obuf, 1) == 0)
+
+ op = NULL;
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
rte_pause();
-#if HEX_DUMP
+#ifdef HEX_DUMP
if (obuf)
hexdump_mbuf_data(stdout, "Dequeued Packet", obuf);
#endif
- return obuf;
+ return op;
}
static struct crypto_testsuite_params testsuite_params = { NULL };
@@ -161,12 +161,13 @@ testsuite_setup(void)
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "MBUF_OFFLOAD_POOL",
+ ts_params->op_mpool = rte_crypto_op_pool_create(
+ "MBUF_CRYPTO_SYM_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_xform),
rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -251,10 +252,9 @@ testsuite_teardown(void)
rte_mempool_count(ts_params->mbuf_pool));
}
-
- if (ts_params->mbuf_ol_pool != NULL) {
+ if (ts_params->op_mpool != NULL) {
RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n",
- rte_mempool_count(ts_params->mbuf_ol_pool));
+ rte_mempool_count(ts_params->op_mpool));
}
}
@@ -324,8 +324,8 @@ ut_teardown(void)
}
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
/*
* free mbuf - both obuf and ibuf are usually the same,
@@ -678,76 +678,77 @@ static uint8_t aes_cbc_iv[] = {
/* ***** AES-CBC / HMAC-SHA1 Hash Tests ***** */
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_ciphertext[] = {
- 0x8B, 0X4D, 0XDA, 0X1B, 0XCF, 0X04, 0XA0, 0X31,
- 0XB4, 0XBF, 0XBD, 0X68, 0X43, 0X20, 0X7E, 0X76,
- 0XB1, 0X96, 0X8B, 0XA2, 0X7C, 0XA2, 0X83, 0X9E,
- 0X39, 0X5A, 0X2F, 0X7E, 0X92, 0XB4, 0X48, 0X1A,
- 0X3F, 0X6B, 0X5D, 0XDF, 0X52, 0X85, 0X5F, 0X8E,
- 0X42, 0X3C, 0XFB, 0XE9, 0X1A, 0X24, 0XD6, 0X08,
- 0XDD, 0XFD, 0X16, 0XFB, 0XE9, 0X55, 0XEF, 0XF0,
- 0XA0, 0X8D, 0X13, 0XAB, 0X81, 0XC6, 0X90, 0X01,
- 0XB5, 0X18, 0X84, 0XB3, 0XF6, 0XE6, 0X11, 0X57,
- 0XD6, 0X71, 0XC6, 0X3C, 0X3F, 0X2F, 0X33, 0XEE,
- 0X24, 0X42, 0X6E, 0XAC, 0X0B, 0XCA, 0XEC, 0XF9,
- 0X84, 0XF8, 0X22, 0XAA, 0X60, 0XF0, 0X32, 0XA9,
- 0X75, 0X75, 0X3B, 0XCB, 0X70, 0X21, 0X0A, 0X8D,
- 0X0F, 0XE0, 0XC4, 0X78, 0X2B, 0XF8, 0X97, 0XE3,
- 0XE4, 0X26, 0X4B, 0X29, 0XDA, 0X88, 0XCD, 0X46,
- 0XEC, 0XAA, 0XF9, 0X7F, 0XF1, 0X15, 0XEA, 0XC3,
- 0X87, 0XE6, 0X31, 0XF2, 0XCF, 0XDE, 0X4D, 0X80,
- 0X70, 0X91, 0X7E, 0X0C, 0XF7, 0X26, 0X3A, 0X92,
- 0X4F, 0X18, 0X83, 0XC0, 0X8F, 0X59, 0X01, 0XA5,
- 0X88, 0XD1, 0XDB, 0X26, 0X71, 0X27, 0X16, 0XF5,
- 0XEE, 0X10, 0X82, 0XAC, 0X68, 0X26, 0X9B, 0XE2,
- 0X6D, 0XD8, 0X9A, 0X80, 0XDF, 0X04, 0X31, 0XD5,
- 0XF1, 0X35, 0X5C, 0X3B, 0XDD, 0X9A, 0X65, 0XBA,
- 0X58, 0X34, 0X85, 0X61, 0X1C, 0X42, 0X10, 0X76,
- 0X73, 0X02, 0X42, 0XC9, 0X23, 0X18, 0X8E, 0XB4,
- 0X6F, 0XB4, 0XA3, 0X54, 0X6E, 0X88, 0X3B, 0X62,
- 0X7C, 0X02, 0X8D, 0X4C, 0X9F, 0XC8, 0X45, 0XF4,
- 0XC9, 0XDE, 0X4F, 0XEB, 0X22, 0X83, 0X1B, 0XE4,
- 0X49, 0X37, 0XE4, 0XAD, 0XE7, 0XCD, 0X21, 0X54,
- 0XBC, 0X1C, 0XC2, 0X04, 0X97, 0XB4, 0X10, 0X61,
- 0XF0, 0XE4, 0XEF, 0X27, 0X63, 0X3A, 0XDA, 0X91,
- 0X41, 0X25, 0X62, 0X1C, 0X5C, 0XB6, 0X38, 0X4A,
- 0X88, 0X71, 0X59, 0X5A, 0X8D, 0XA0, 0X09, 0XAF,
- 0X72, 0X94, 0XD7, 0X79, 0X5C, 0X60, 0X7C, 0X8F,
- 0X4C, 0XF5, 0XD9, 0XA1, 0X39, 0X6D, 0X81, 0X28,
- 0XEF, 0X13, 0X28, 0XDF, 0XF5, 0X3E, 0XF7, 0X8E,
- 0X09, 0X9C, 0X78, 0X18, 0X79, 0XB8, 0X68, 0XD7,
- 0XA8, 0X29, 0X62, 0XAD, 0XDE, 0XE1, 0X61, 0X76,
- 0X1B, 0X05, 0X16, 0XCD, 0XBF, 0X02, 0X8E, 0XA6,
- 0X43, 0X6E, 0X92, 0X55, 0X4F, 0X60, 0X9C, 0X03,
- 0XB8, 0X4F, 0XA3, 0X02, 0XAC, 0XA8, 0XA7, 0X0C,
- 0X1E, 0XB5, 0X6B, 0XF8, 0XC8, 0X4D, 0XDE, 0XD2,
- 0XB0, 0X29, 0X6E, 0X40, 0XE6, 0XD6, 0XC9, 0XE6,
- 0XB9, 0X0F, 0XB6, 0X63, 0XF5, 0XAA, 0X2B, 0X96,
- 0XA7, 0X16, 0XAC, 0X4E, 0X0A, 0X33, 0X1C, 0XA6,
- 0XE6, 0XBD, 0X8A, 0XCF, 0X40, 0XA9, 0XB2, 0XFA,
- 0X63, 0X27, 0XFD, 0X9B, 0XD9, 0XFC, 0XD5, 0X87,
- 0X8D, 0X4C, 0XB6, 0XA4, 0XCB, 0XE7, 0X74, 0X55,
- 0XF4, 0XFB, 0X41, 0X25, 0XB5, 0X4B, 0X0A, 0X1B,
- 0XB1, 0XD6, 0XB7, 0XD9, 0X47, 0X2A, 0XC3, 0X98,
- 0X6A, 0XC4, 0X03, 0X73, 0X1F, 0X93, 0X6E, 0X53,
- 0X19, 0X25, 0X64, 0X15, 0X83, 0XF9, 0X73, 0X2A,
- 0X74, 0XB4, 0X93, 0X69, 0XC4, 0X72, 0XFC, 0X26,
- 0XA2, 0X9F, 0X43, 0X45, 0XDD, 0XB9, 0XEF, 0X36,
- 0XC8, 0X3A, 0XCD, 0X99, 0X9B, 0X54, 0X1A, 0X36,
- 0XC1, 0X59, 0XF8, 0X98, 0XA8, 0XCC, 0X28, 0X0D,
- 0X73, 0X4C, 0XEE, 0X98, 0XCB, 0X7C, 0X58, 0X7E,
- 0X20, 0X75, 0X1E, 0XB7, 0XC9, 0XF8, 0XF2, 0X0E,
- 0X63, 0X9E, 0X05, 0X78, 0X1A, 0XB6, 0XA8, 0X7A,
- 0XF9, 0X98, 0X6A, 0XA6, 0X46, 0X84, 0X2E, 0XF6,
- 0X4B, 0XDC, 0X9B, 0X8F, 0X9B, 0X8F, 0XEE, 0XB4,
- 0XAA, 0X3F, 0XEE, 0XC0, 0X37, 0X27, 0X76, 0XC7,
- 0X95, 0XBB, 0X26, 0X74, 0X69, 0X12, 0X7F, 0XF1,
- 0XBB, 0XFF, 0XAE, 0XB5, 0X99, 0X6E, 0XCB, 0X0C
+ 0x8B, 0x4D, 0xDA, 0x1B, 0xCF, 0x04, 0xA0, 0x31,
+ 0xB4, 0xBF, 0xBD, 0x68, 0x43, 0x20, 0x7E, 0x76,
+ 0xB1, 0x96, 0x8B, 0xA2, 0x7C, 0xA2, 0x83, 0x9E,
+ 0x39, 0x5A, 0x2F, 0x7E, 0x92, 0xB4, 0x48, 0x1A,
+ 0x3F, 0x6B, 0x5D, 0xDF, 0x52, 0x85, 0x5F, 0x8E,
+ 0x42, 0x3C, 0xFB, 0xE9, 0x1A, 0x24, 0xD6, 0x08,
+ 0xDD, 0xFD, 0x16, 0xFB, 0xE9, 0x55, 0xEF, 0xF0,
+ 0xA0, 0x8D, 0x13, 0xAB, 0x81, 0xC6, 0x90, 0x01,
+ 0xB5, 0x18, 0x84, 0xB3, 0xF6, 0xE6, 0x11, 0x57,
+ 0xD6, 0x71, 0xC6, 0x3C, 0x3F, 0x2F, 0x33, 0xEE,
+ 0x24, 0x42, 0x6E, 0xAC, 0x0B, 0xCA, 0xEC, 0xF9,
+ 0x84, 0xF8, 0x22, 0xAA, 0x60, 0xF0, 0x32, 0xA9,
+ 0x75, 0x75, 0x3B, 0xCB, 0x70, 0x21, 0x0A, 0x8D,
+ 0x0F, 0xE0, 0xC4, 0x78, 0x2B, 0xF8, 0x97, 0xE3,
+ 0xE4, 0x26, 0x4B, 0x29, 0xDA, 0x88, 0xCD, 0x46,
+ 0xEC, 0xAA, 0xF9, 0x7F, 0xF1, 0x15, 0xEA, 0xC3,
+ 0x87, 0xE6, 0x31, 0xF2, 0xCF, 0xDE, 0x4D, 0x80,
+ 0x70, 0x91, 0x7E, 0x0C, 0xF7, 0x26, 0x3A, 0x92,
+ 0x4F, 0x18, 0x83, 0xC0, 0x8F, 0x59, 0x01, 0xA5,
+ 0x88, 0xD1, 0xDB, 0x26, 0x71, 0x27, 0x16, 0xF5,
+ 0xEE, 0x10, 0x82, 0xAC, 0x68, 0x26, 0x9B, 0xE2,
+ 0x6D, 0xD8, 0x9A, 0x80, 0xDF, 0x04, 0x31, 0xD5,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x73, 0x02, 0x42, 0xC9, 0x23, 0x18, 0x8E, 0xB4,
+ 0x6F, 0xB4, 0xA3, 0x54, 0x6E, 0x88, 0x3B, 0x62,
+ 0x7C, 0x02, 0x8D, 0x4C, 0x9F, 0xC8, 0x45, 0xF4,
+ 0xC9, 0xDE, 0x4F, 0xEB, 0x22, 0x83, 0x1B, 0xE4,
+ 0x49, 0x37, 0xE4, 0xAD, 0xE7, 0xCD, 0x21, 0x54,
+ 0xBC, 0x1C, 0xC2, 0x04, 0x97, 0xB4, 0x10, 0x61,
+ 0xF0, 0xE4, 0xEF, 0x27, 0x63, 0x3A, 0xDA, 0x91,
+ 0x41, 0x25, 0x62, 0x1C, 0x5C, 0xB6, 0x38, 0x4A,
+ 0x88, 0x71, 0x59, 0x5A, 0x8D, 0xA0, 0x09, 0xAF,
+ 0x72, 0x94, 0xD7, 0x79, 0x5C, 0x60, 0x7C, 0x8F,
+ 0x4C, 0xF5, 0xD9, 0xA1, 0x39, 0x6D, 0x81, 0x28,
+ 0xEF, 0x13, 0x28, 0xDF, 0xF5, 0x3E, 0xF7, 0x8E,
+ 0x09, 0x9C, 0x78, 0x18, 0x79, 0xB8, 0x68, 0xD7,
+ 0xA8, 0x29, 0x62, 0xAD, 0xDE, 0xE1, 0x61, 0x76,
+ 0x1B, 0x05, 0x16, 0xCD, 0xBF, 0x02, 0x8E, 0xA6,
+ 0x43, 0x6E, 0x92, 0x55, 0x4F, 0x60, 0x9C, 0x03,
+ 0xB8, 0x4F, 0xA3, 0x02, 0xAC, 0xA8, 0xA7, 0x0C,
+ 0x1E, 0xB5, 0x6B, 0xF8, 0xC8, 0x4D, 0xDE, 0xD2,
+ 0xB0, 0x29, 0x6E, 0x40, 0xE6, 0xD6, 0xC9, 0xE6,
+ 0xB9, 0x0F, 0xB6, 0x63, 0xF5, 0xAA, 0x2B, 0x96,
+ 0xA7, 0x16, 0xAC, 0x4E, 0x0A, 0x33, 0x1C, 0xA6,
+ 0xE6, 0xBD, 0x8A, 0xCF, 0x40, 0xA9, 0xB2, 0xFA,
+ 0x63, 0x27, 0xFD, 0x9B, 0xD9, 0xFC, 0xD5, 0x87,
+ 0x8D, 0x4C, 0xB6, 0xA4, 0xCB, 0xE7, 0x74, 0x55,
+ 0xF4, 0xFB, 0x41, 0x25, 0xB5, 0x4B, 0x0A, 0x1B,
+ 0xB1, 0xD6, 0xB7, 0xD9, 0x47, 0x2A, 0xC3, 0x98,
+ 0x6A, 0xC4, 0x03, 0x73, 0x1F, 0x93, 0x6E, 0x53,
+ 0x19, 0x25, 0x64, 0x15, 0x83, 0xF9, 0x73, 0x2A,
+ 0x74, 0xB4, 0x93, 0x69, 0xC4, 0x72, 0xFC, 0x26,
+ 0xA2, 0x9F, 0x43, 0x45, 0xDD, 0xB9, 0xEF, 0x36,
+ 0xC8, 0x3A, 0xCD, 0x99, 0x9B, 0x54, 0x1A, 0x36,
+ 0xC1, 0x59, 0xF8, 0x98, 0xA8, 0xCC, 0x28, 0x0D,
+ 0x73, 0x4C, 0xEE, 0x98, 0xCB, 0x7C, 0x58, 0x7E,
+ 0x20, 0x75, 0x1E, 0xB7, 0xC9, 0xF8, 0xF2, 0x0E,
+ 0x63, 0x9E, 0x05, 0x78, 0x1A, 0xB6, 0xA8, 0x7A,
+ 0xF9, 0x98, 0x6A, 0xA6, 0x46, 0x84, 0x2E, 0xF6,
+ 0x4B, 0xDC, 0x9B, 0x8F, 0x9B, 0x8F, 0xEE, 0xB4,
+ 0xAA, 0x3F, 0xEE, 0xC0, 0x37, 0x27, 0x76, 0xC7,
+ 0x95, 0xBB, 0x26, 0x74, 0x69, 0x12, 0x7F, 0xF1,
+ 0xBB, 0xFF, 0xAE, 0xB5, 0x99, 0x6E, 0xCB, 0x0C
};
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest[] = {
- 0x9a, 0X4f, 0X88, 0X1b, 0Xb6, 0X8f, 0Xd8, 0X60,
- 0X42, 0X1a, 0X7d, 0X3d, 0Xf5, 0X82, 0X80, 0Xf1,
- 0X18, 0X8c, 0X1d, 0X32 };
+ 0x9a, 0x4f, 0x88, 0x1b, 0xb6, 0x8f, 0xd8, 0x60,
+ 0x42, 0x1a, 0x7d, 0x3d, 0xf5, 0x82, 0x80, 0xf1,
+ 0x18, 0x8c, 0x1d, 0x32
+};
static int
@@ -774,7 +775,6 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
-
ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
@@ -784,58 +784,63 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.key.data = hmac_sha1_key;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
- /* Create Crypto session*/
+ /* Create crypto session*/
ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
- /* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ /* Generate crypto op data structure */
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, ut_params->sess);
- /* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* Set crypto operation data parameters */
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ uint8_t *ciphertext = rte_pktmbuf_mtod_offset(ut_params->op->sym.m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC);
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(ciphertext,
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
- "Ciphertext data not as expected");
+ "ciphertext data not as expected");
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ uint8_t *digest = ciphertext + QUOTE_512_BYTES;
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(digest,
catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest,
gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 :
@@ -860,60 +865,65 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_xforms(
- ut_params->ol, 2),
+ TEST_ASSERT_NOT_NULL(rte_crypto_sym_op_xforms_alloc(ut_params->op, 2),
"failed to allocate space for crypto transforms");
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
+
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_XFORM_CIPHER;
+ sym_op->xform->type = RTE_CRYPTO_XFORM_CIPHER;
/* cipher parameters */
- ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- ut_params->op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->op->xform->cipher.key.data = aes_cbc_key;
- ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
+ sym_op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ sym_op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ sym_op->xform->cipher.key.data = aes_cbc_key;
+ sym_op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_XFORM_AUTH;
+ sym_op->xform->next->type = RTE_CRYPTO_XFORM_AUTH;
- ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- ut_params->op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
- ut_params->op->xform->next->auth.key.data = hmac_sha1_key;
- ut_params->op->xform->next->auth.digest_length =
+ sym_op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ sym_op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ sym_op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
+ sym_op->xform->next->auth.key.data = hmac_sha1_key;
+ sym_op->xform->next->auth.digest_length =
DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
+
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym.m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -981,42 +991,46 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
-
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym.m_src;
+
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1089,41 +1103,46 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym.m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1191,42 +1210,47 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym.m_src;
+
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1307,41 +1331,47 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym.m_src;
+
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1440,43 +1470,47 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, sess);
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, sess);
+
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym.m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1545,37 +1579,41 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)
- rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym.m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1584,6 +1622,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
"Ciphertext data not as expected");
+
TEST_ASSERT_BUFFERS_ARE_EQUAL(
rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
@@ -1638,35 +1677,41 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, ut_params->sess);
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
+
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym.m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1826,50 +1871,48 @@ test_not_in_place_crypto(void)
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, ut_params->sess);
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
+
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
-
- ut_params->op->dst.m = dst_m;
- ut_params->op->dst.offset = 0;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_dst = dst_m;
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process op obuf");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->op->dst.m, char *),
+ rte_pktmbuf_mtod(ut_params->op->sym.m_dst, char *),
catch_22_quote,
QUOTE_512_BYTES,
"Plaintext data not as expected");
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 034393e..2491ca5 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -32,8 +32,6 @@
#ifndef TEST_CRYPTODEV_H_
#define TEST_CRYPTODEV_H_
-#define HEX_DUMP 0
-
#define FALSE 0
#define TRUE 1
@@ -47,8 +45,9 @@
#define DEFAULT_NUM_XFORMS (2)
#define NUM_MBUFS (8191)
#define MBUF_CACHE_SIZE (250)
-#define MBUF_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512 + \
- sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATAPAYLOAD_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512)
+#define MBUF_SIZE (sizeof(struct rte_mbuf) + \
+ RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
#define BYTE_LENGTH(x) (x/8)
/* HASH DIGEST LENGTHS */
@@ -62,6 +61,7 @@
#define AES_XCBC_MAC_KEY_SZ (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 (12)
+#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA224 (14)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA256 (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA512 (32)
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 1744e13..5058a84 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -32,7 +32,6 @@
#include <rte_common.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
@@ -50,7 +49,7 @@
struct crypto_testsuite_params {
struct rte_mempool *mbuf_mp;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
uint16_t nb_queue_pairs;
@@ -69,7 +68,6 @@ struct crypto_unittest_params {
struct rte_cryptodev_session *sess;
struct rte_crypto_op *op;
- struct rte_mbuf_offload *ol;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
struct rte_mbuf *ibuf[MAX_NUM_OF_OPS_PER_UT];
@@ -79,7 +77,7 @@ struct crypto_unittest_params {
static struct rte_mbuf *
setup_test_string(struct rte_mempool *mpool,
- const char *string, size_t len, uint8_t blocksize)
+ const uint8_t *data, size_t len, uint8_t blocksize)
{
struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
size_t t_len = len - (blocksize ? (len % blocksize) : 0);
@@ -92,7 +90,7 @@ setup_test_string(struct rte_mempool *mpool,
return NULL;
}
- rte_memcpy(dst, string, t_len);
+ rte_memcpy(dst, (const void *)data, t_len);
}
return m;
}
@@ -112,23 +110,23 @@ testsuite_setup(void)
ts_params->mbuf_mp = rte_mempool_lookup("CRYPTO_PERF_MBUFPOOL");
if (ts_params->mbuf_mp == NULL) {
/* Not already created so create */
- ts_params->mbuf_mp = rte_mempool_create("CRYPTO_PERF_MBUFPOOL", NUM_MBUFS,
- MBUF_SIZE, MBUF_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
- rte_socket_id(), 0);
+ ts_params->mbuf_mp = rte_pktmbuf_pool_create(
+ "CRYPTO_PERF_MBUFPOOL",
+ NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
+ rte_socket_id());
if (ts_params->mbuf_mp == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_PERF_MBUFPOOL\n");
return TEST_FAILED;
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
- NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_xform),
- rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+
+ ts_params->op_mpool = rte_crypto_op_pool_create("CRYPTO_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ NUM_MBUFS, MBUF_CACHE_SIZE,
+ DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_xform),
+ rte_socket_id());
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -255,8 +253,8 @@ ut_teardown(void)
ut_params->sess);
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
for (i = 0; i < MAX_NUM_OF_OPS_PER_UT; i++) {
if (ut_params->obuf[i])
@@ -1697,11 +1695,12 @@ struct crypto_data_params aes_cbc_hmac_sha256_output[MAX_PACKET_SIZE_INDEX] = {
static int
test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
{
- uint32_t num_to_submit = 2048, max_outstanding_reqs = 512;
- struct rte_mbuf *rx_mbufs[num_to_submit], *tx_mbufs[num_to_submit];
+ uint32_t num_to_submit = 4096;
+ struct rte_crypto_op *c_ops[num_to_submit];
+ struct rte_crypto_op *proc_ops[num_to_submit];
uint64_t failed_polls, retries, start_cycles, end_cycles, total_cycles = 0;
uint32_t burst_sent, burst_received;
- uint32_t b, burst_size, num_sent, num_received;
+ uint32_t i, burst_size, num_sent, num_received;
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1738,46 +1737,50 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure(s) */
- for (b = 0; b < num_to_submit ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ for (i = 0; i < num_to_submit ; i++) {
+ struct rte_mbuf *m = setup_test_string(ts_params->mbuf_mp,
+ data_params[0].expected.ciphertext,
data_params[0].length, 0);
- TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
+ TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(tx_mbufs[b],
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
DIGEST_BYTE_LENGTH_SHA256);
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
rte_memcpy(ut_params->digest, data_params[0].expected.digest,
DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_op *op =
+ rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(&op->sym, ut_params->sess);
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
+ op->sym.auth.digest.data = ut_params->digest;
+ op->sym.auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
data_params[0].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym.auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ op->sym.auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym.auth.data.length = data_params[0].length;
+
+
+ op->sym.cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(m,
+ CIPHER_IV_LENGTH_AES_CBC);
+ op->sym.cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ rte_memcpy(op->sym.cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ op->sym.cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym.cipher.data.length = data_params[0].length;
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[0].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[0].length;
+ op->sym.m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[i] = op;
}
printf("\nTest to measure the IA cycle cost using AES128_CBC_SHA256_HMAC "
@@ -1788,17 +1791,17 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
printf("\nDev No\tQP No\tNum Sent\tNum Received\tTx/Rx burst");
printf("\tRetries (Device Busy)\tAverage IA cycle cost "
"(assuming 0 retries)");
- for (b = 2; b <= 128 ; b *= 2) {
+ for (i = 2; i <= 128 ; i *= 2) {
num_sent = 0;
num_received = 0;
retries = 0;
failed_polls = 0;
- burst_size = b;
+ burst_size = i;
total_cycles = 0;
while (num_sent < num_to_submit) {
start_cycles = rte_rdtsc_precise();
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0,
- &tx_mbufs[num_sent],
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, &c_ops[num_sent],
((num_to_submit-num_sent) < burst_size) ?
num_to_submit-num_sent : burst_size);
if (burst_sent == 0)
@@ -1813,8 +1816,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1822,12 +1825,15 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
end_cycles = rte_rdtsc_precise();
total_cycles += end_cycles - start_cycles;
}
+
while (num_received != num_to_submit) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1841,16 +1847,9 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
printf("\n");
- for (b = 0; b < max_outstanding_reqs ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ for (i = 0; i < num_to_submit ; i++) {
+ rte_pktmbuf_free(c_ops[i]->sym.m_src);
+ rte_crypto_op_free(c_ops[i]);
}
return TEST_SUCCESS;
}
@@ -1860,11 +1859,14 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
{
uint16_t index;
uint32_t burst_sent, burst_received;
- uint32_t b, num_sent, num_received, throughput;
+ uint32_t b, num_sent, num_received;
uint64_t failed_polls, retries, start_cycles, end_cycles;
const uint64_t mhz = rte_get_tsc_hz()/1000000;
- double mmps;
- struct rte_mbuf *rx_mbufs[DEFAULT_BURST_SIZE], *tx_mbufs[DEFAULT_BURST_SIZE];
+ double throughput, mmps;
+
+ struct rte_crypto_op *c_ops[DEFAULT_BURST_SIZE];
+ struct rte_crypto_op *proc_ops[DEFAULT_BURST_SIZE];
+
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1903,7 +1905,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
"AES128_CBC_SHA256_HMAC requests with a constant burst "
"size of %u while varying payload sizes", DEFAULT_BURST_SIZE);
printf("\nDev No\tQP No\tReq Size(B)\tNum Sent\tNum Received\t"
- "Mrps\tThoughput(Mbps)");
+ "Mrps\tThoughput(Gbps)");
printf("\tRetries (Attempted a burst, but the device was busy)");
for (index = 0; index < MAX_PACKET_SIZE_INDEX; index++) {
num_sent = 0;
@@ -1913,94 +1915,106 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
+ struct rte_mbuf *m = setup_test_string(
+ ts_params->mbuf_mp,
+ (const uint8_t *)
data_params[index].plaintext,
data_params[index].length,
0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(
- tx_mbufs[b], DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
+ DIGEST_BYTE_LENGTH_SHA256);
+ TEST_ASSERT_NOT_NULL(ut_params->digest
+ , "no room to append digest");
- rte_memcpy(ut_params->digest, data_params[index].expected.digest,
- DIGEST_BYTE_LENGTH_SHA256);
+ rte_memcpy(ut_params->digest,
+ data_params[index].expected.digest,
+ DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
+ struct rte_crypto_op *op = rte_crypto_op_alloc(
+ ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- struct rte_crypto_op *cop = &ol->op.crypto;
+ rte_crypto_sym_op_attach_session(&op->sym,
+ ut_params->sess);
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ op->sym.auth.digest.data = ut_params->digest;
+ op->sym.auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(m,
+ data_params[index].length);
+ op->sym.auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
- tx_mbufs[b], data_params[index].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym.auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym.auth.data.length = data_params[index].length;
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ op->sym.cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ m, CIPHER_IV_LENGTH_AES_CBC);
+ op->sym.cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(op->sym.cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ op->sym.cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym.cipher.data.length = data_params[index].length;
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[index].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[index].length;
+ op->sym.m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[b] = op;
}
+
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent) < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent : DEFAULT_BURST_SIZE);
+ uint16_t burst_size = (DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent) < DEFAULT_BURST_SIZE ?
+ DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent : DEFAULT_BURST_SIZE;
+
+ burst_sent = rte_cryptodev_enqueue_burst(
+ dev_num, 0, c_ops, burst_size);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
burst_received = rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, DEFAULT_BURST_SIZE);
+ 0, proc_ops, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
num_received += burst_received;
}
while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
-
- burst_received = rte_cryptodev_dequeue_burst(dev_num, 0,
- rx_mbufs, DEFAULT_BURST_SIZE);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
+
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops,
+ DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
num_received += burst_received;
}
end_cycles = rte_rdtsc_precise();
- mmps = (double)num_received*mhz/(end_cycles - start_cycles);
- throughput = mmps*data_params[index].length*8;
+ mmps = ((double)num_received * mhz) /
+ (end_cycles - start_cycles);
+ throughput = (mmps * data_params[index].length * 8) / 1000;
+
printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
- data_params[index].length, num_sent, num_received);
- printf("\t%.2f\t%u", mmps, throughput);
+ data_params[index].length,
+ num_sent, num_received);
+ printf("\t%.2f\t%.2f", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ rte_pktmbuf_free(c_ops[b]->sym.m_src);
+ rte_crypto_op_free(c_ops[b]);
}
}
+
printf("\n");
return TEST_SUCCESS;
}
diff --git a/config/common_bsdapp b/config/common_bsdapp
index ed7c31c..2868208 100644
--- a/config/common_bsdapp
+++ b/config/common_bsdapp
@@ -362,13 +362,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
#
-# Compile librte_mbuf_offload
-# EXPERIMENTAL: API may change without prior notice
-#
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n
-
-#
# Compile librte_timer
#
CONFIG_RTE_LIBRTE_TIMER=y
diff --git a/config/common_linuxapp b/config/common_linuxapp
index 74bc515..65720a2 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -379,13 +379,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
#
-# Compile librte_mbuf_offload
-# EXPERIMENTAL: API may change without prior notice
-#
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n
-
-#
# Compile librte_timer
#
CONFIG_RTE_LIBRTE_TIMER=y
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 7a91001..f626386 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -104,7 +104,6 @@ There are many libraries, so their headers may be grouped by topics:
- **containers**:
[mbuf] (@ref rte_mbuf.h),
- [mbuf_offload] (@ref rte_mbuf_offload.h),
[ring] (@ref rte_ring.h),
[distributor] (@ref rte_distributor.h),
[reorder] (@ref rte_reorder.h),
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index d8ccf05..bfe4a98 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -38,7 +38,6 @@
#include <rte_dev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
-#include <rte_mbuf_offload.h>
#include "rte_aesni_mb_pmd_private.h"
@@ -297,27 +296,30 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
{
- struct aesni_mb_session *sess;
+ struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_OP_WITH_SESSION) {
- if (unlikely(crypto_op->session->type !=
+ if (op->sym.type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (unlikely(op->sym.session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
- sess = (struct aesni_mb_session *)crypto_op->session->_private;
+ sess = (struct aesni_mb_session *)op->sym.session->_private;
} else {
- struct rte_cryptodev_session *c_sess = NULL;
+ void *_sess = NULL;
- if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
return NULL;
- sess = (struct aesni_mb_session *)c_sess->_private;
+ sess = (struct aesni_mb_session *)
+ ((struct rte_cryptodev_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
- sess, crypto_op->xform) != 0))
- return NULL;
+ sess, op->sym.xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ sess = NULL;
+ }
}
return sess;
@@ -336,11 +338,14 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
* - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
*/
static JOB_AES_HMAC *
-process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_op *c_op, struct aesni_mb_session *session)
+process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
+ struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
+ struct rte_mbuf *m_src = op->sym.m_src, *m_dst;
+ uint16_t m_offset = 0;
+
job = (*qp->ops->job.get_next)(&qp->mb_mgr);
if (unlikely(job == NULL))
return job;
@@ -369,49 +374,65 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
}
/* Mutable crypto operation parameters */
+ if (op->sym.m_dst) {
+ m_src = m_dst = op->sym.m_dst;
+
+ /* append space for output data to mbuf */
+ char *odata = rte_pktmbuf_append(m_dst,
+ rte_pktmbuf_data_len(op->sym.m_src));
+ if (odata == NULL)
+ MB_LOG_ERR("failed to allocate space in destination "
+ "mbuf for source data");
+
+ memcpy(odata, rte_pktmbuf_mtod(op->sym.m_src, void*),
+ rte_pktmbuf_data_len(op->sym.m_src));
+ } else {
+ m_dst = m_src;
+ m_offset = op->sym.cipher.data.offset;
+ }
/* Set digest output location */
if (job->cipher_direction == DECRYPT) {
- job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m,
+ job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
get_digest_byte_length(job->hash_alg));
- if (job->auth_tag_output)
- memset(job->auth_tag_output, 0,
- sizeof(get_digest_byte_length(job->hash_alg)));
- else
+ if (job->auth_tag_output == NULL) {
+ MB_LOG_ERR("failed to allocate space in output mbuf "
+ "for temp digest");
return NULL;
+ }
+
+ memset(job->auth_tag_output, 0,
+ sizeof(get_digest_byte_length(job->hash_alg)));
+
} else {
- job->auth_tag_output = c_op->digest.data;
+ job->auth_tag_output = op->sym.auth.digest.data;
}
/*
- * Multiple buffer library current only support returning a truncated
+ * Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
job->auth_tag_output_len_in_bytes =
get_truncated_digest_byte_length(job->hash_alg);
/* Set IV parameters */
- job->iv = c_op->iv.data;
- job->iv_len_in_bytes = c_op->iv.length;
+ job->iv = op->sym.cipher.iv.data;
+ job->iv_len_in_bytes = op->sym.cipher.iv.length;
/* Data Parameter */
- job->src = rte_pktmbuf_mtod(m, uint8_t *);
- job->dst = c_op->dst.m ?
- rte_pktmbuf_mtod(c_op->dst.m, uint8_t *) +
- c_op->dst.offset :
- rte_pktmbuf_mtod(m, uint8_t *) +
- c_op->data.to_cipher.offset;
+ job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
- job->cipher_start_src_offset_in_bytes = c_op->data.to_cipher.offset;
- job->msg_len_to_cipher_in_bytes = c_op->data.to_cipher.length;
+ job->cipher_start_src_offset_in_bytes = op->sym.cipher.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym.cipher.data.length;
- job->hash_start_src_offset_in_bytes = c_op->data.to_hash.offset;
- job->msg_len_to_hash_in_bytes = c_op->data.to_hash.length;
+ job->hash_start_src_offset_in_bytes = op->sym.auth.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym.auth.data.length;
/* Set user data to be crypto operation data struct */
- job->user_data = m;
- job->user_data2 = c_op;
+ job->user_data = op;
+ job->user_data2 = m_dst;
return job;
}
@@ -426,43 +447,41 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
* verification of supplied digest in the case of a HASH_CIPHER operation
* - Returns NULL on invalid job
*/
-static struct rte_mbuf *
+static struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m;
- struct rte_crypto_op *c_op;
+ struct rte_crypto_op *op =
+ (struct rte_crypto_op *)job->user_data;
+ struct rte_mbuf *m_dst =
+ (struct rte_mbuf *)job->user_data2;
- if (job->user_data == NULL)
+ if (op == NULL || m_dst == NULL)
return NULL;
- /* handled retrieved job */
- m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_op *)job->user_data2;
-
/* set status as successful by default */
- c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* check if job has been processed */
if (unlikely(job->status != STS_COMPLETED)) {
- c_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return m;
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return op;
} else if (job->chain_order == HASH_CIPHER) {
/* Verify digest if required */
- if (memcmp(job->auth_tag_output, c_op->digest.data,
+ if (memcmp(job->auth_tag_output, op->sym.auth.digest.data,
job->auth_tag_output_len_in_bytes) != 0)
- c_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m, get_digest_byte_length(job->hash_alg));
+ rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_OP_SESSIONLESS) {
- rte_mempool_put(qp->sess_mp, c_op->session);
- c_op->session = NULL;
+ if (op->sym.type == RTE_CRYPTO_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym.session);
+ op->sym.session = NULL;
}
- return m;
+ return op;
}
/**
@@ -478,16 +497,16 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
static unsigned
handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m = NULL;
+ struct rte_crypto_op *op = NULL;
unsigned processed_jobs = 0;
while (job) {
processed_jobs++;
- m = post_process_mb_job(qp, job);
- if (m)
- rte_ring_enqueue(qp->processed_pkts, (void *)m);
+ op = post_process_mb_job(qp, job);
+ if (op)
+ rte_ring_enqueue(qp->processed_pkts, (void *)op);
else
- qp->qp_stats.dequeue_err_count++;
+ qp->stats.dequeue_err_count++;
job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
}
@@ -496,52 +515,49 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
static uint16_t
-aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
- uint16_t nb_bufs)
+aesni_mb_pmd_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
-
struct aesni_mb_session *sess;
- struct aesni_mb_qp *qp = queue_pair;
+ struct aesni_mb_qp *_qp = qp;
JOB_AES_HMAC *job = NULL;
int i, processed_jobs = 0;
- for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i], RTE_PKTMBUF_OL_CRYPTO);
- if (unlikely(ol == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ for (i = 0; i < nb_ops; i++) {
+ if (unlikely(ops[i]->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
- sess = get_session(qp, &ol->op.crypto);
+ sess = get_session(_qp, ops[i]);
if (unlikely(sess == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
- job = process_crypto_op(qp, bufs[i], &ol->op.crypto, sess);
+ job = process_crypto_op(_qp, ops[i], sess);
if (unlikely(job == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
/* Submit Job */
- job = (*qp->ops->job.submit)(&qp->mb_mgr);
+ job = (*_qp->ops->job.submit)(&_qp->mb_mgr);
/*
* If submit returns a processed job then handle it,
* before submitting subsequent jobs
*/
if (job)
- processed_jobs += handle_completed_jobs(qp, job);
+ processed_jobs += handle_completed_jobs(_qp, job);
}
if (processed_jobs == 0)
goto flush_jobs;
else
- qp->qp_stats.enqueued_count += processed_jobs;
+ _qp->stats.enqueued_count += processed_jobs;
return i;
flush_jobs:
@@ -549,26 +565,22 @@ flush_jobs:
* If we haven't processed any jobs in submit loop, then flush jobs
* queue to stop the output stalling
*/
- job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
+ job = (*_qp->ops->job.flush_job)(&_qp->mb_mgr);
if (job)
- qp->qp_stats.enqueued_count += handle_completed_jobs(qp, job);
+ _qp->stats.enqueued_count += handle_completed_jobs(_qp, job);
return i;
}
static uint16_t
-aesni_mb_pmd_dequeue_burst(void *queue_pair,
- struct rte_mbuf **bufs, uint16_t nb_bufs)
+aesni_mb_pmd_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct aesni_mb_qp *qp = queue_pair;
-
- unsigned nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)bufs, nb_bufs);
- qp->qp_stats.dequeued_count += nb_dequeued;
+ struct aesni_mb_qp *_qp = qp;
- return nb_dequeued;
+ return _qp->stats.dequeued_count =
+ rte_ring_dequeue_burst(_qp->processed_pkts,
+ (void **)ops, nb_ops);
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 96d22f6..612877b 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -76,11 +76,11 @@ aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- stats->enqueued_count += qp->qp_stats.enqueued_count;
- stats->dequeued_count += qp->qp_stats.dequeued_count;
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
- stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
- stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
}
}
@@ -93,7 +93,7 @@ aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
}
}
@@ -196,7 +196,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp->sess_mp = dev->data->session_pool;
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
/* Initialise multi-buffer manager */
(*qp->ops->job.init_mgr)(&qp->mb_mgr);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 2f98609..304c85c 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -150,7 +150,7 @@ struct aesni_mb_qp {
/**< Ring for placing process packets */
struct rte_mempool *sess_mp;
/**< Session Mempool */
- struct rte_cryptodev_stats qp_stats;
+ struct rte_cryptodev_stats stats;
/**< Queue pair statistics */
} __rte_cache_aligned;
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 47b257f..6c7b450 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -59,7 +59,6 @@
#include <rte_mbuf.h>
#include <rte_string_fns.h>
#include <rte_spinlock.h>
-#include <rte_mbuf_offload.h>
#include <rte_hexdump.h>
#include "qat_logs.h"
@@ -72,7 +71,7 @@ static inline uint32_t
adf_modulo(uint32_t data, uint32_t shift);
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg);
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
void *session)
@@ -275,15 +274,15 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+uint16_t qat_pmd_enqueue_sym_op_burst(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
register struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
register uint32_t nb_pkts_sent = 0;
- register struct rte_mbuf **cur_tx_pkt = tx_pkts;
+ register struct rte_crypto_op **cur_op = ops;
register int ret;
- uint16_t nb_pkts_possible = nb_pkts;
+ uint16_t nb_ops_possible = nb_ops;
register uint8_t *base_addr;
register uint32_t tail;
int overflow;
@@ -294,19 +293,18 @@ uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = queue->tail;
/* Find how many can actually fit on the ring */
- overflow = (rte_atomic16_add_return(&tmp_qp->inflights16, nb_pkts)
+ overflow = (rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
- queue->max_inflights);
if (overflow > 0) {
rte_atomic16_sub(&tmp_qp->inflights16, overflow);
- nb_pkts_possible = nb_pkts - overflow;
- if (nb_pkts_possible == 0)
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
return 0;
}
- while (nb_pkts_sent != nb_pkts_possible) {
+ while (nb_pkts_sent != nb_ops_possible) {
- ret = qat_alg_write_mbuf_entry(*cur_tx_pkt,
- base_addr + tail);
+ ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
if (nb_pkts_sent == 0)
@@ -316,7 +314,7 @@ uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = adf_modulo(tail + queue->msg_size, queue->modulo);
nb_pkts_sent++;
- cur_tx_pkt++;
+ cur_op++;
}
kick_tail:
WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
@@ -327,13 +325,13 @@ kick_tail:
}
uint16_t
-qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+qat_pmd_dequeue_sym_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
uint32_t msg_counter = 0;
- struct rte_mbuf *rx_mbuf;
+ struct rte_crypto_op *rx_op;
struct icp_qat_fw_comn_resp *resp_msg;
queue = &(tmp_qp->rx_q);
@@ -341,16 +339,20 @@ qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
((uint8_t *)queue->base_addr + queue->head);
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
- msg_counter != nb_pkts) {
- rx_mbuf = (struct rte_mbuf *)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf, RTE_PKTMBUF_OL_CRYPTO);
+ msg_counter != nb_ops) {
+ rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
*(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
queue->head = adf_modulo(queue->head +
@@ -359,9 +361,8 @@ qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
resp_msg = (struct icp_qat_fw_comn_resp *)
((uint8_t *)queue->base_addr +
queue->head);
-
- *rx_pkts = rx_mbuf;
- rx_pkts++;
+ *ops = rx_op;
+ ops++;
msg_counter++;
}
if (msg_counter > 0) {
@@ -375,74 +376,64 @@ qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
{
- struct rte_mbuf_offload *ol;
-
struct qat_session *ctx;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO);
- if (unlikely(ol == NULL)) {
- PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
- "to (%p) mbuf.", mbuf);
- return -EINVAL;
- }
-
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ if (unlikely(op->sym.type == RTE_CRYPTO_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
- " requests mbuf (%p) is sessionless.", mbuf);
+ " requests, op (%p) is sessionless.", op);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type != RTE_CRYPTODEV_QAT_PMD)) {
+ if (unlikely(op->sym.session->type != RTE_CRYPTODEV_QAT_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
- ctx = (struct qat_session *)ol->op.crypto.session->_private;
+ ctx = (struct qat_session *)op->sym.session->_private;
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
*qat_req = ctx->fw_req;
- qat_req->comn_mid.opaque_data = (uint64_t)mbuf;
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
/*
* The following code assumes:
* - single entry buffer.
- * - always in place.
*/
- qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length = mbuf->data_len;
- qat_req->comn_mid.dest_data_addr =
- qat_req->comn_mid.src_data_addr =
- rte_pktmbuf_mtophys(mbuf);
+ qat_req->comn_mid.dst_length = qat_req->comn_mid.src_length =
+ rte_pktmbuf_data_len(op->sym.m_src);
+ qat_req->comn_mid.dest_data_addr = qat_req->comn_mid.src_data_addr =
+ rte_pktmbuf_mtophys(op->sym.m_src);
cipher_param = (void *)&qat_req->serv_specif_rqpars;
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
- cipher_param->cipher_length = ol->op.crypto.data.to_cipher.length;
- cipher_param->cipher_offset = ol->op.crypto.data.to_cipher.offset;
- if (ol->op.crypto.iv.length &&
- (ol->op.crypto.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array))) {
+ cipher_param->cipher_length = op->sym.cipher.data.length;
+ cipher_param->cipher_offset = op->sym.cipher.data.offset;
+ if (op->sym.cipher.iv.length && (op->sym.cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array))) {
rte_memcpy(cipher_param->u.cipher_IV_array,
- ol->op.crypto.iv.data, ol->op.crypto.iv.length);
+ op->sym.cipher.iv.data,
+ op->sym.cipher.iv.length);
} else {
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr = ol->op.crypto.iv.phys_addr;
+ cipher_param->u.s.cipher_IV_ptr = op->sym.cipher.iv.phys_addr;
}
- if (ol->op.crypto.digest.phys_addr) {
+ if (op->sym.auth.digest.phys_addr) {
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
- auth_param->auth_res_addr = ol->op.crypto.digest.phys_addr;
+ auth_param->auth_res_addr = op->sym.auth.digest.phys_addr;
}
- auth_param->auth_off = ol->op.crypto.data.to_hash.offset;
- auth_param->auth_len = ol->op.crypto.data.to_hash.length;
- auth_param->u1.aad_adr = ol->op.crypto.additional_auth.phys_addr;
+ auth_param->auth_off = op->sym.auth.data.offset;
+ auth_param->auth_len = op->sym.auth.data.length;
+
+ auth_param->u1.aad_adr = op->sym.auth.aad.phys_addr;
/* (GCM) aad length(240 max) will be at this location after precompute */
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
@@ -454,9 +445,19 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
}
auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));
+ rte_hexdump(stdout, "src_data:",
+ rte_pktmbuf_mtod(op->sym.m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym.m_src));
+ rte_hexdump(stdout, "iv:", op->sym.cipher.iv.data,
+ op->sym.cipher.iv.length);
+ rte_hexdump(stdout, "digest:", op->sym.auth.digest.data,
+ op->sym.auth.digest.length);
+ rte_hexdump(stdout, "aad:", op->sym.auth.aad.data,
+ op->sym.auth.aad.length);
#endif
return 0;
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index d680364..c2c2cca 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -115,10 +115,12 @@ extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
-uint16_t
-qat_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_enqueue_sym_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
-uint16_t
-qat_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_dequeue_sym_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index e500c1e..84086b1 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -92,8 +92,8 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->dev_type = RTE_CRYPTODEV_QAT_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_pmd_enqueue_sym_op_burst;
+ cryptodev->dequeue_burst = qat_pmd_dequeue_sym_op_burst;
internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index d70fc9a..c0c71b0 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -62,7 +62,6 @@
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_memcpy.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -85,6 +84,7 @@
*/
#define RTE_TEST_RX_DESC_DEFAULT 128
#define RTE_TEST_TX_DESC_DEFAULT 512
+
static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
@@ -104,6 +104,11 @@ struct pkt_buffer {
struct rte_mbuf *buffer[MAX_PKT_BURST];
};
+struct op_buffer {
+ unsigned len;
+ struct rte_crypto_op *buffer[MAX_PKT_BURST];
+};
+
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
@@ -154,14 +159,16 @@ struct lcore_queue_conf {
unsigned nb_crypto_devs;
unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
- struct pkt_buffer crypto_pkt_buf[RTE_MAX_ETHPORTS];
- struct pkt_buffer tx_pkt_buf[RTE_MAX_ETHPORTS];
+ struct op_buffer op_buf[RTE_MAX_ETHPORTS];
+ struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
} __rte_cache_aligned;
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
static const struct rte_eth_conf port_conf = {
.rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+ .max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
@@ -175,7 +182,7 @@ static const struct rte_eth_conf port_conf = {
};
struct rte_mempool *l2fwd_pktmbuf_pool;
-struct rte_mempool *l2fwd_mbuf_ol_pool;
+struct rte_mempool *l2fwd_crypto_op_pool;
/* Per-port statistics struct */
struct l2fwd_port_statistics {
@@ -200,14 +207,17 @@ struct l2fwd_crypto_statistics crypto_statistics[RTE_MAX_ETHPORTS];
/* A tsc-based timer responsible for triggering statistics printout */
#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
-#define MAX_TIMER_PERIOD 86400 /* 1 day max */
+#define MAX_TIMER_PERIOD 86400UL /* 1 day max */
/* default period is 10 seconds */
static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000;
-uint64_t total_packets_dropped = 0, total_packets_tx = 0, total_packets_rx = 0,
- total_packets_enqueued = 0, total_packets_dequeued = 0,
- total_packets_errors = 0;
+uint64_t total_packets_dropped = 0,
+ total_packets_tx = 0,
+ total_packets_rx = 0,
+ total_packets_enqueued = 0,
+ total_packets_dequeued = 0,
+ total_packets_errors = 0;
/* Print out statistics on packets dropped */
static void
@@ -284,20 +294,21 @@ static int
l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
struct l2fwd_crypto_params *cparams)
{
- struct rte_mbuf **pkt_buffer;
+ struct rte_crypto_op **op_buffer;
unsigned ret;
- pkt_buffer = (struct rte_mbuf **)
- qconf->crypto_pkt_buf[cparams->dev_id].buffer;
+ op_buffer = (struct rte_crypto_op **)
+ qconf->op_buf[cparams->dev_id].buffer;
+
+ ret = rte_cryptodev_enqueue_burst(cparams->dev_id,
+ cparams->qp_id, op_buffer, (uint16_t) n);
- ret = rte_cryptodev_enqueue_burst(cparams->dev_id, cparams->qp_id,
- pkt_buffer, (uint16_t) n);
crypto_statistics[cparams->dev_id].enqueued += ret;
if (unlikely(ret < n)) {
crypto_statistics[cparams->dev_id].errors += (n - ret);
do {
- rte_pktmbuf_offload_free(pkt_buffer[ret]->offload_ops);
- rte_pktmbuf_free(pkt_buffer[ret]);
+ rte_pktmbuf_free(op_buffer[ret]->sym.m_src);
+ rte_crypto_op_free(op_buffer[ret]);
} while (++ret < n);
}
@@ -305,7 +316,8 @@ l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
}
static int
-l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
+l2fwd_crypto_enqueue(struct rte_crypto_op *op,
+ struct l2fwd_crypto_params *cparams)
{
unsigned lcore_id, len;
struct lcore_queue_conf *qconf;
@@ -313,23 +325,23 @@ l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->crypto_pkt_buf[cparams->dev_id].len;
- qconf->crypto_pkt_buf[cparams->dev_id].buffer[len] = m;
+ len = qconf->op_buf[cparams->dev_id].len;
+ qconf->op_buf[cparams->dev_id].buffer[len] = op;
len++;
- /* enough pkts to be sent */
+ /* enough ops to be sent */
if (len == MAX_PKT_BURST) {
l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams);
len = 0;
}
- qconf->crypto_pkt_buf[cparams->dev_id].len = len;
+ qconf->op_buf[cparams->dev_id].len = len;
return 0;
}
static int
l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
- struct rte_mbuf_offload *ol,
+ struct rte_crypto_op *op,
struct l2fwd_crypto_params *cparams)
{
struct ether_hdr *eth_hdr;
@@ -367,43 +379,43 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_sym_op_attach_session(&op->sym, cparams->session);
/* Append space for digest to end of packet */
- ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
+ op->sym.auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
cparams->digest_length);
- ol->op.crypto.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym.auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
- ol->op.crypto.digest.length = cparams->digest_length;
+ op->sym.auth.digest.length = cparams->digest_length;
+
+ op->sym.auth.data.offset = ipdata_offset;
+ op->sym.auth.data.length = data_len;
- ol->op.crypto.iv.data = cparams->iv_key.data;
- ol->op.crypto.iv.phys_addr = cparams->iv_key.phys_addr;
- ol->op.crypto.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_cipher.offset = ipdata_offset;
- ol->op.crypto.data.to_cipher.length = data_len;
+ op->sym.cipher.iv.data = cparams->iv_key.data;
+ op->sym.cipher.iv.phys_addr = cparams->iv_key.phys_addr;
+ op->sym.cipher.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_hash.offset = ipdata_offset;
- ol->op.crypto.data.to_hash.length = data_len;
+ op->sym.cipher.data.offset = ipdata_offset;
+ op->sym.cipher.data.length = data_len;
- rte_pktmbuf_offload_attach(m, ol);
+ op->sym.m_src = m;
- return l2fwd_crypto_enqueue(m, cparams);
+ return l2fwd_crypto_enqueue(op, cparams);
}
/* Send the burst of packets on an output interface */
static int
-l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)
+l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
+ uint8_t port)
{
struct rte_mbuf **pkt_buffer;
unsigned ret;
- unsigned queueid = 0;
- pkt_buffer = (struct rte_mbuf **)qconf->tx_pkt_buf[port].buffer;
+ pkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer;
- ret = rte_eth_tx_burst(port, (uint16_t) queueid, pkt_buffer,
- (uint16_t)n);
+ ret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n);
port_statistics[port].tx += ret;
if (unlikely(ret < n)) {
port_statistics[port].dropped += (n - ret);
@@ -425,8 +437,8 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->tx_pkt_buf[port].len;
- qconf->tx_pkt_buf[port].buffer[len] = m;
+ len = qconf->pkt_buf[port].len;
+ qconf->pkt_buf[port].buffer[len] = m;
len++;
/* enough pkts to be sent */
@@ -435,7 +447,7 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
len = 0;
}
- qconf->tx_pkt_buf[port].len = len;
+ qconf->pkt_buf[port].len = len;
return 0;
}
@@ -495,6 +507,8 @@ static void
l2fwd_main_loop(struct l2fwd_crypto_options *options)
{
struct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST];
+ struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
+
unsigned lcore_id = rte_lcore_id();
uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
unsigned i, j, portid, nb_rx;
@@ -555,12 +569,12 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
if (unlikely(diff_tsc > drain_tsc)) {
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- if (qconf->tx_pkt_buf[portid].len == 0)
+ if (qconf->pkt_buf[portid].len == 0)
continue;
l2fwd_send_burst(&lcore_queue_conf[lcore_id],
- qconf->tx_pkt_buf[portid].len,
+ qconf->pkt_buf[portid].len,
(uint8_t) portid);
- qconf->tx_pkt_buf[portid].len = 0;
+ qconf->pkt_buf[portid].len = 0;
}
/* if timer is enabled */
@@ -589,7 +603,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
* Read packet from RX queues
*/
for (i = 0; i < qconf->nb_rx_ports; i++) {
- struct rte_mbuf_offload *ol;
+ struct rte_crypto_op *op;
portid = qconf->rx_port_list[i];
@@ -603,15 +617,14 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
/* Enqueue packets from Crypto device*/
for (j = 0; j < nb_rx; j++) {
m = pkts_burst[j];
- ol = rte_pktmbuf_offload_alloc(
- l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ op = rte_crypto_op_alloc(l2fwd_crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
/*
- * If we can't allocate a offload, then drop
+ * If we can't allocate a crypto_op, then drop
* the rest of the burst and dequeue and
* process the packets to free offload structs
*/
- if (unlikely(ol == NULL)) {
+ if (unlikely(op == NULL)) {
for (; j < nb_rx; j++) {
rte_pktmbuf_free(pkts_burst[j]);
port_statistics[portid].dropped++;
@@ -620,24 +633,31 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
}
rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- rte_prefetch0((void *)ol);
+ rte_prefetch0((void *)op);
- l2fwd_simple_crypto_enqueue(m, ol, cparams);
+ l2fwd_simple_crypto_enqueue(m, op, cparams);
}
/* Dequeue packets from Crypto device */
- nb_rx = rte_cryptodev_dequeue_burst(
- cparams->dev_id, cparams->qp_id,
- pkts_burst, MAX_PKT_BURST);
- crypto_statistics[cparams->dev_id].dequeued += nb_rx;
+ do {
+ nb_rx = rte_cryptodev_dequeue_burst(
+ cparams->dev_id, cparams->qp_id,
+ ops_burst, MAX_PKT_BURST);
- /* Forward crypto'd packets */
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- rte_pktmbuf_offload_free(m->offload_ops);
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- l2fwd_simple_forward(m, portid);
- }
+ crypto_statistics[cparams->dev_id].dequeued +=
+ nb_rx;
+
+ /* Forward crypto'd packets */
+ for (j = 0; j < nb_rx; j++) {
+ m = ops_burst[j]->sym.m_src;
+
+ rte_crypto_op_free(ops_burst[j]);
+ rte_prefetch0(rte_pktmbuf_mtod(m,
+ void *));
+
+ l2fwd_simple_forward(m, portid);
+ }
+ } while (nb_rx == MAX_PKT_BURST);
}
}
}
@@ -750,26 +770,17 @@ parse_key(struct rte_crypto_key *key __rte_unused,
static int
parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg)
{
- if (strcmp("SHA1", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA1;
- return 0;
- } else if (strcmp("SHA1_HMAC", optarg) == 0) {
+ if (strcmp("SHA1_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
return 0;
- } else if (strcmp("SHA224", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA224;
- return 0;
} else if (strcmp("SHA224_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
return 0;
- } else if (strcmp("SHA256", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA256;
- return 0;
} else if (strcmp("SHA256_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
return 0;
- } else if (strcmp("SHA512", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA256;
+ } else if (strcmp("SHA384_HMAC", optarg) == 0) {
+ *algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
return 0;
} else if (strcmp("SHA512_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
@@ -800,7 +811,7 @@ static int
l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
struct option *lgopts, int option_index)
{
- if (strcmp(lgopts[option_index].name, "cdev_type") == 0)
+ if (strcmp(lgopts[option_index].name, "cdev") == 0)
return parse_cryptodev_type(&options->cdev_type, optarg);
else if (strcmp(lgopts[option_index].name, "chain") == 0)
@@ -825,12 +836,10 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
/* Authentication options */
else if (strcmp(lgopts[option_index].name, "auth_algo") == 0)
- return parse_auth_algo(&options->cipher_xform.auth.algo,
- optarg);
+ return parse_auth_algo(&options->auth_xform.auth.algo, optarg);
else if (strcmp(lgopts[option_index].name, "auth_op") == 0)
- return parse_auth_op(&options->cipher_xform.auth.op,
- optarg);
+ return parse_auth_op(&options->auth_xform.auth.op, optarg);
else if (strcmp(lgopts[option_index].name, "auth_key") == 0)
return parse_key(&options->auth_xform.auth.key,
@@ -896,16 +905,16 @@ l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options,
const char *q_arg)
{
char *end = NULL;
- long int n;
+ unsigned long n;
/* parse number string */
- n = strtol(q_arg, &end, 10);
+ n = (unsigned)strtol(q_arg, &end, 10);
if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
n = 0;
if (n >= MAX_TIMER_PERIOD) {
- printf("Warning refresh period specified %ld is greater than "
- "max value %d! using max value",
+ printf("Warning refresh period specified %lu is greater than "
+ "max value %lu! using max value",
n, MAX_TIMER_PERIOD);
n = MAX_TIMER_PERIOD;
}
@@ -925,9 +934,9 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
options->nb_ports_per_lcore = 1;
options->refresh_period = 10000;
options->single_lcore = 0;
+ options->sessionless = 0;
options->cdev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
- options->sessionless = 0;
options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
/* Cipher Data */
@@ -984,39 +993,6 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
printf("sessionless crypto: %s\n",
options->sessionless ? "enabled" : "disabled");
-#if 0
- options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
-
- /* Cipher Data */
- options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
- options->cipher_xform.next = NULL;
-
- options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- options->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
-
- generate_random_key(options->ckey_data, sizeof(options->ckey_data));
-
- options->cipher_xform.cipher.key.data = options->ckey_data;
- options->cipher_xform.cipher.key.phys_addr = 0;
- options->cipher_xform.cipher.key.length = 16;
-
-
- /* Authentication Data */
- options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
- options->auth_xform.next = NULL;
-
- options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
-
- options->auth_xform.auth.add_auth_data_length = 0;
- options->auth_xform.auth.digest_length = 20;
-
- generate_random_key(options->akey_data, sizeof(options->akey_data));
-
- options->auth_xform.auth.key.data = options->akey_data;
- options->auth_xform.auth.key.phys_addr = 0;
- options->auth_xform.auth.key.length = 20;
-#endif
}
/* Parse the argument given in the command line of the application */
@@ -1030,7 +1006,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
static struct option lgopts[] = {
{ "sessionless", no_argument, 0, 0 },
- { "cdev_type", required_argument, 0, 0 },
+ { "cdev", required_argument, 0, 0 },
{ "chain", required_argument, 0, 0 },
{ "cipher_algo", required_argument, 0, 0 },
@@ -1044,6 +1020,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
{ "iv", required_argument, 0, 0 },
{ "sessionless", no_argument, 0, 0 },
+
{ NULL, 0, 0, 0 }
};
@@ -1361,15 +1338,17 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n");
/* create the mbuf pool */
- l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 128,
- 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512,
+ sizeof(struct rte_crypto_op),
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
if (l2fwd_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
/* create crypto op pool */
- l2fwd_mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "mbuf_offload_pool", NB_MBUF, 128, 0, rte_socket_id());
- if (l2fwd_mbuf_ol_pool == NULL)
+ l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, 0,
+ rte_socket_id());
+ if (l2fwd_crypto_op_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
/* Enable Ethernet ports */
diff --git a/lib/Makefile b/lib/Makefile
index ef172ea..4c5c1b4 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -36,7 +36,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_EAL) += librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_RING) += librte_ring
DIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += librte_mempool
DIRS-$(CONFIG_RTE_LIBRTE_MBUF) += librte_mbuf
-DIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += librte_mbuf_offload
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += librte_timer
DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile
DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index 42343a8..901b82a 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -347,12 +347,15 @@ enum rte_crypto_op_sess_type {
enum rte_crypto_op_status {
RTE_CRYPTO_OP_STATUS_SUCCESS,
/**< Operation completed successfully */
- RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
+ RTE_CRYPTO_OP_STATUS_NOT_SUBMITTED,
/**< Operation not yet submitted to a cryptodev */
RTE_CRYPTO_OP_STATUS_ENQUEUED,
/**< Operation is enqueued on device */
RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
/**< Authentication verification failed */
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION,
+ /**< Operation failed due to invalid session args or if in session-less
+ * mode fail to created session */
RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
/**< Operation failed due to invalid arguments in request */
RTE_CRYPTO_OP_STATUS_ERROR,
@@ -360,20 +363,16 @@ enum rte_crypto_op_status {
};
/**
- * Cryptographic Operation Data.
+ * Symmetric Cryptographic Operation.
*
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_enqueue_burst() call
- * for performing cipher, hash, or a combined hash and cipher operations.
+ * This structure contains data relating to performing symmetric cryptographic
+ * processing on a referenced mbuf data buffer.
*/
-struct rte_crypto_op {
+struct rte_crypto_sym_op {
enum rte_crypto_op_sess_type type;
- enum rte_crypto_op_status status;
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
+ struct rte_mbuf *m_src; /**< source mbuf */
+ struct rte_mbuf *m_dst; /**< destination mbuf */
union {
struct rte_cryptodev_session *session;
@@ -384,7 +383,7 @@ struct rte_crypto_op {
struct {
struct {
- uint32_t offset;
+ uint16_t offset;
/**< Starting point for cipher processing, specified
* as number of bytes from start of data in the source
* buffer. The result of the cipher operation will be
@@ -392,7 +391,7 @@ struct rte_crypto_op {
* this location.
*/
- uint32_t length;
+ uint16_t length;
/**< The message length, in bytes, of the source buffer
* on which the cryptographic operation will be
* computed. This must be a multiple of the block size
@@ -411,17 +410,68 @@ struct rte_crypto_op {
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
* field should be set to 0.
*/
- } to_cipher; /**< Data offsets and length for ciphering */
+ } data; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * Initialisation Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length
+ * is 96 bits) or J0 (for other sizes), where J0 is as
+ * defined by NIST SP800-38D. Regardless of the IV
+ * length, a full 16 bytes needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the
+ * nonce should be written starting at &iv[1] (to allow
+ * space for the implementation to write in the flags
+ * in the first byte). Note that a full 16 bytes should
+ * be allocated, even though the length field will
+ * have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from
+ * IEEE Std 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD
+ * be 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ uint16_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * length of the IV (which must be the same as the
+ * block length of the cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length
+ * of the counter (which must be the same as the block
+ * length of the cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs)
+ * or 16, in which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce,
+ * which can be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+ } cipher;
+ struct {
struct {
- uint32_t offset;
+ uint16_t offset;
/**< Starting point for hash processing, specified as
* number of bytes from start of packet in source
* buffer.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
+ * ignored. The field @ref aad field
* should be set instead.
*
* @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
@@ -429,180 +479,398 @@ struct rte_crypto_op {
* of the AAD data in the source buffer.
*/
- uint32_t length;
+ uint16_t length;
/**< The message length, in bytes, of the source
* buffer that the hash will be computed on.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
+ * ignored. The field @ref aad field should be set
+ * instead.
*
* @note
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
* of operation, this field specifies the length of
* the AAD data in the source buffer.
*/
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
+ } data; /**< Data offsets and length for authentication */
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
-
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result
+ * should be inserted (in the case of digest generation)
+ * or where the purported digest exists (in the case of
+ * digest verification).
+ *
+ * At session creation time, the client specified the
+ * digest result length with the digest_length member
+ * of the @ref rte_crypto_auth_xform structure. For
+ * physical crypto devices the caller must allocate at
+ * least digest_length of physically contiguous memory
+ * at this location.
+ *
+ * For digest generation, the digest result will
+ * overwrite any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is
+ * understood to be in the destination buffer for
+ * digest generation, and in the source buffer for
+ * digest verification. The location of the digest
+ * result in this case is immediately following the
+ * region over which the digest is computed.
+ */
+ phys_addr_t phys_addr;
+ /**< Physical address of digest */
+ uint16_t length;
+ /**< Length of digest */
+ } digest; /**< Digest parameters */
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD)
+ * needed for authenticated cipher mechanisms (CCM and
+ * GCM), and to the IV for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is
+ * set up for the session in the @ref
+ * rte_crypto_auth_xform structure as part of the @ref
+ * rte_cryptodev_session_create function call. This
+ * length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM),
+ * the caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset
+ * of one byte into the array, leaving room for the
+ * implementation to write in the flags to the first
+ * byte.
+ *
+ * - the additional authentication data itself should
+ * be written starting at an offset of 18 bytes into
+ * the array, leaving room for the length encoding in
+ * the first two bytes of the second block.
+ *
+ * - the array should be big enough to hold the above
+ * fields, plus any padding to round this up to the
+ * nearest multiple of the block size (16 bytes).
+ * Padding will be added by the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus
+ * any space to round this up to the nearest multiple
+ * of the block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set
+ * to 0. Instead the AAD data should be placed in the
+ * source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint16_t length; /**< Length of digest */
+ } aad;
+ /**< Additional authentication parameters */
+ } auth;
void *user_data;
/**< opaque pointer for user data */
};
+/** Crypto operation types */
+enum rte_crypto_op_type {
+ RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, /**< Symmetric operation */
+};
/**
- * Reset the fields of a crypto operation to their default values.
+ * Cryptographic Operation.
+ *
+ * This structure contains data relating to performing cryptographic
+ * operations. This operation structure is used to contain any operation which
+ * is supported by the cryptodev API, PMDs should check the type parameter to
+ * verify that the operation is a support function of the device. Crypto
+ * operations are enqueued and dequeued in crypto PMDs using the
+ * rte_cryptodev_enqueue_burst() / rte_cryptodev_dequeue_burst() .
+ */
+struct rte_crypto_op {
+ enum rte_crypto_op_type type;
+ /**< operation type */
+
+ enum rte_crypto_op_status status;
+ /**< operation status - this is reset to
+ * RTE_CRYPTO_OP_STATUS_NOT_SUBMITTED on allocation from mempool and
+ * should be set to RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation
+ * is successfully processed by crypto PMD */
+
+ struct rte_mempool *mempool;
+ /**< mempool crypto op allcoated from */
+
+ union {
+ struct rte_crypto_sym_op sym;
+ /**< Symmetric operation parameters */
+ }; /** operation specific parameters */
+};
+
+
+/**
+ * Reset the fields of a symmetric operation to their default values.
*
* @param op The crypto operation to be reset.
*/
static inline void
-__rte_crypto_op_reset(struct rte_crypto_op *op)
+__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
{
op->type = RTE_CRYPTO_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
+
+ op->m_src = NULL;
+ op->m_dst = NULL;
+}
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ * @param type The crypto operation type.
+ */
+static inline void
+__rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type)
+{
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_SUBMITTED;
+
+ if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ op->type = type;
+ __rte_crypto_sym_op_reset(&op->sym);
+ }
}
-/** Attach a session to a crypto operation */
+/**
+ * Attach a session to a symmetric crypto operation
+ *
+ * @param op crypto operation
+ * @param sess cryptodev session
+ */
static inline void
-rte_crypto_op_attach_session(struct rte_crypto_op *op,
+rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
struct rte_cryptodev_session *sess)
{
op->session = sess;
op->type = RTE_CRYPTO_OP_WITH_SESSION;
}
+/**
+ * Private data structure belonging to a crypto symmetric operation pool.*/
+struct rte_crypto_op_pool_private {
+ enum rte_crypto_op_type type;
+ uint16_t priv_size;
+ /**< Size of private area in each crypto operation. */
+};
+
+
+/**
+ * Returns the size of private data allocated with each rte_crypto_op object by
+ * the mempool
+ *
+ * @param mempool rte_crypto_op mempool
+ *
+ * @return private data size
+ */
+static inline uint16_t
+__rte_crypto_op_get_priv_data_size(struct rte_mempool *mempool)
+{
+ struct rte_crypto_op_pool_private *priv =
+ rte_mempool_get_priv(mempool);
+
+ return priv->priv_size;
+}
+
+
+/**
+ * Creates a crypto operation pool
+ *
+ * @param name pool name
+ * @param type crypto operation type, use
+ * RTE_CRYPTO_OP_TYPE_UNDEFINED for a pool which
+ * supports all operation types
+ * @param size number of elements in pool
+ * @param cache_size Number of elements for core cache
+ * @param priv_size Size of private data to allocate with each
+ * operation
+ * @param socket_id Socket to allocate memory on
+ *
+ * @return
+ * - On success pointer to mempool
+ * - On failure NULL
+ */
+extern struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned size, unsigned cache_size, uint16_t priv_size,
+ int socket_id);
+
+/**
+ * Allocate raw element from mempool an return as crypto operation
+ *
+ * @param mempool crypto operation mempool.
+ * @param type crypto operation type.
+ *
+ * @returns
+ * - On success a rte_crypto_op structure
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_op *
+__rte_crypto_op_raw_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type)
+{
+ struct rte_crypto_op_pool_private *priv =
+ rte_mempool_get_priv(mempool);
+
+ if (unlikely(priv->type != type))
+ return NULL;
+
+ void *buf = NULL;
+
+ if (rte_mempool_get(mempool, &buf) < 0)
+ return NULL;
+
+ return (struct rte_crypto_op *)buf;
+}
+
+/**
+ * Allocate a crypto operation from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_op *
+rte_crypto_op_alloc(struct rte_mempool *mempool, enum rte_crypto_op_type type)
+{
+ struct rte_crypto_op *op = __rte_crypto_op_raw_alloc(mempool, type);
+
+ if (unlikely(op != NULL))
+ __rte_crypto_op_reset(op, type);
+
+ return op;
+}
+
+/**
+ * Allocate a symmetric crypto operation in the private data of an mbuf.
+ *
+ * @param m mbuf in which to allocate crypto operation in.
+ *
+ * @returns
+ * - On success returns a pointer to the crypto operation, which
+ * m_src parameter will be set to the mbuf in which the operation is allocated
+ * from.
+ * - On failure returns NULL.
+ */
+static inline struct rte_crypto_op *
+rte_crypto_sym_op_alloc_priv(struct rte_mbuf *m)
+{
+ if (unlikely(m == NULL))
+ return NULL;
+
+ /* check that the mbuf's private data size is sufficient to contain a
+ * crypto operation */
+ if (unlikely(m->priv_size < sizeof(struct rte_crypto_op)))
+ return NULL;
+
+ /* private data starts immediately after the mbuf header in the mbuf. */
+ struct rte_crypto_op *op = (struct rte_crypto_op *)(m + 1);
+
+ __rte_crypto_op_reset(op, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+
+ op->mempool = NULL;
+ op->sym.m_src = m;
+
+ return op;
+}
+
+
+/**
+ * Returns a pointer to the private data of a crypto operation if
+ * that operation has enough capacity for requested size.
+ *
+ * @param op crypto operation.
+ * @param size size of space requested in private data.
+ *
+ * @returns
+ * - if sufficient space available returns pointer to start of private data
+ * - if insufficient space returns NULL
+ */
+static inline void *
+__rte_crypto_op_get_priv_data(struct rte_crypto_op *op, uint32_t size)
+{
+ uint32_t priv_size;
+
+ if (likely(op->mempool != NULL)) {
+ priv_size = __rte_crypto_op_get_priv_data_size(op->mempool);
+
+ if (likely(priv_size >= size))
+ return (void *)(op + 1);
+ }
+
+ return NULL;
+}
+/**
+ * Allocate space for crypto xforms in the private data space of the
+ * symmetric crypto operation. This also defaults the crypto xform type and
+ * configures the chaining of the xforms in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_xform *
+rte_crypto_sym_op_xforms_alloc(struct rte_crypto_op *op, uint8_t nb_xforms)
+{
+ struct rte_crypto_xform *xform;
+ void *priv_data;
+ uint32_t size;
+
+ size = sizeof(struct rte_crypto_xform) * nb_xforms;
+
+ priv_data = __rte_crypto_op_get_priv_data(op, size);
+ if (priv_data == NULL)
+ return NULL;
+
+ op->sym.xform = xform = (struct rte_crypto_xform *)priv_data;
+
+ do {
+ xform->type = RTE_CRYPTO_XFORM_NOT_SPECIFIED;
+ xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
+ } while (xform);
+
+ return op->sym.xform;
+}
+
+/**
+ * free crypto operation structure
+ * If operation has been allocate from a rte_mempool, then the operation will
+ * be returned to the mempool.
+ *
+ * @param op symmetric crypto operation
+ */
+static inline void
+rte_crypto_op_free(struct rte_crypto_op *op)
+{
+ if (op != NULL && op->mempool != NULL)
+ rte_mempool_put(op->mempool, op);
+}
+
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index f09f67e..5475470 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -71,6 +71,15 @@
#include "rte_cryptodev.h"
#include "rte_cryptodev_pmd.h"
+
+const char *cryptodev_pmd_names[] = {
+ [RTE_CRYPTODEV_NULL_PMD] = CRYPTODEV_NAME_NULL_PMD,
+ [RTE_CRYPTODEV_AESNI_MB_PMD] = CRYPTODEV_NAME_AESNI_MB_PMD,
+ [RTE_CRYPTODEV_QAT_PMD] = CRYPTODEV_NAME_QAT_PMD
+};
+
+const char **rte_cyptodev_names = &cryptodev_pmd_names[0];
+
struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
struct rte_cryptodev *rte_cryptodevs = &rte_crypto_devices[0];
@@ -1093,3 +1102,71 @@ rte_cryptodev_session_free(uint8_t dev_id, struct rte_cryptodev_session *sess)
return NULL;
}
+
+/** Initialize rte_crypto_op mempool element */
+static void
+rte_crypto_op_init(struct rte_mempool *mempool,
+ void *opaque_arg,
+ void *_op_data,
+ __rte_unused unsigned i)
+{
+ struct rte_crypto_op *op = _op_data;
+ enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
+
+ memset(_op_data, 0, mempool->elt_size);
+
+ __rte_crypto_op_reset(op, type);
+
+ op->mempool = mempool;
+}
+
+
+struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned size, unsigned cache_size, uint16_t priv_size,
+ int socket_id)
+{
+ struct rte_crypto_op_pool_private *priv;
+ unsigned elt_size = sizeof(struct rte_crypto_op) + priv_size;
+
+
+ /* lookup mempool in case already allocated */
+ struct rte_mempool *mp = rte_mempool_lookup(name);
+
+ if (mp != NULL) {
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ if (priv->priv_size < priv_size || mp->elt_size != elt_size ||
+ mp->cache_size < cache_size ||
+ mp->size < size) {
+ mp = NULL;
+ return NULL;
+ }
+ return mp;
+ }
+
+ mp = rte_mempool_create(
+ name,
+ size,
+ elt_size,
+ cache_size,
+ sizeof(struct rte_crypto_op_pool_private),
+ NULL,
+ NULL,
+ rte_crypto_op_init,
+ &type,
+ socket_id,
+ 0);
+
+ if (mp == NULL)
+ return NULL;
+
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ priv->priv_size = priv_size;
+ priv->type = type;
+
+ return mp;
+}
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 892375d..5c088b0 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -67,6 +67,9 @@ enum rte_cryptodev_type {
RTE_CRYPTODEV_QAT_PMD, /**< QAT PMD */
};
+
+extern const char **rte_cyptodev_names;
+
/* Logging Macros */
#define CDEV_LOG_ERR(fmt, args...) \
@@ -440,12 +443,12 @@ rte_cryptodev_callback_unregister(uint8_t dev_id,
rte_cryptodev_cb_fn cb_fn, void *cb_arg);
-typedef uint16_t (*dequeue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Dequeue processed packets from queue pair of a device. */
-typedef uint16_t (*enqueue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Enqueue packets for processing on queue pair of a device. */
@@ -516,68 +519,72 @@ struct rte_cryptodev_data {
} __rte_cache_aligned;
extern struct rte_cryptodev *rte_cryptodevs;
+
/**
*
- * Dequeue a burst of processed packets from a queue of the crypto device.
- * The dequeued packets are stored in *rte_mbuf* structures whose pointers are
- * supplied in the *pkts* array.
+ * Dequeue a burst of processed symmetric operations from a queue on the crypto
+ * device. The dequeued operation are stored in *rte_crypto_op* structures
+ * whose pointers are supplied in the *ops* array.
*
- * The rte_crypto_dequeue_burst() function returns the number of packets
- * actually dequeued, which is the number of *rte_mbuf* data structures
- * effectively supplied into the *pkts* array.
+ * The rte_cryptodev_dequeue_burst() function returns the number of ops
+ * actually dequeued, which is the number of *rte_crypto_op* data structures
+ * effectively supplied into the *ops* array.
*
- * A return value equal to *nb_pkts* indicates that the queue contained
- * at least *rx_pkts* packets, and this is likely to signify that other
- * received packets remain in the input queue. Applications implementing
- * a "retrieve as much received packets as possible" policy can check this
- * specific case and keep invoking the rte_crypto_dequeue_burst() function
- * until a value less than *nb_pkts* is returned.
+ * A return value equal to *nb_ops* indicates that the queue contained
+ * at least *nb_ops* operations, and this is likely to signify that other
+ * processed operations remain in the devices output queue. Applications
+ * implementing a "retrieve as many processed operations as possible" policy
+ * can check this specific case and keep invoking the
+ * rte_cryptodev_dequeue_burst() function until a value less than
+ * *nb_ops* is returned.
*
- * The rte_crypto_dequeue_burst() function does not provide any error
+ * The rte_cryptodev_dequeue_burst() function does not provide any error
* notification to avoid the corresponding overhead.
*
- * @param dev_id The identifier of the device.
+ * @param dev_id The symmetric crypto device identifier
* @param qp_id The index of the queue pair from which to
* retrieve processed packets. The value must be
* in the range [0, nb_queue_pair - 1] previously
* supplied to rte_cryptodev_configure().
- * @param pkts The address of an array of pointers to
- * *rte_mbuf* structures that must be large enough
- * to store *nb_pkts* pointers in it.
- * @param nb_pkts The maximum number of packets to dequeue.
+ * @param ops The address of an array of pointers to
+ * *rte_crypto_op* structures that must be
+ * large enough to store *nb_ops* pointers in it.
+ * @param nb_ops The maximum number of operations to dequeue.
*
* @return
- * - The number of packets actually dequeued, which is the number
- * of pointers to *rte_mbuf* structures effectively supplied to the
- * *pkts* array.
+ * - The number of operations actually dequeued, which is the number
+ * of pointers to *rte_crypto_op* structures effectively supplied to the
+ * *ops* array.
*/
static inline uint16_t
rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
- nb_pkts = (*dev->dequeue_burst)
- (dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ nb_ops = (*dev->dequeue_burst)
+ (dev->data->queue_pairs[qp_id], ops, nb_ops);
- return nb_pkts;
+ return nb_ops;
}
/**
- * Enqueue a burst of packets for processing on a crypto device.
+ * Enqueue a burst of symmetric operations for processing on a crypto device.
*
- * The rte_crypto_enqueue_burst() function is invoked to place packets
- * on the queue *queue_id* of the device designated by its *dev_id*.
+ * The rte_cryptodev_enqueue_burst() function is invoked to place
+ * symmetric crypto operations on the queue *qp_id* of the device designated by
+ * its *dev_id*.
*
- * The *nb_pkts* parameter is the number of packets to process which are
- * supplied in the *pkts* array of *rte_mbuf* structures.
+ * The *nb_ops* parameter is the number of operations to process which are
+ * supplied in the *ops* array of *rte_crypto_op* structures.
*
- * The rte_crypto_enqueue_burst() function returns the number of packets it
- * actually sent. A return value equal to *nb_pkts* means that all packets
- * have been sent.
+ * The rte_cryptodev_enqueue_burst() function returns the number of
+ * packets it actually enqueued for processing. A return value equal to
+ * *nb_ops* means that all packets have been enqueued.
*
- * Each mbuf in the *pkts* array must have a valid *rte_mbuf_offload* structure
- * attached which contains a valid crypto operation.
+ * Each operation in the *ops* array must have a valid *rte_mbuf* structure
+ * attached, via m_src parameter, which contains the source data which the
+ * crypto operation is to be performed on
*
* @param dev_id The identifier of the device.
* @param qp_id The index of the queue pair which packets are
@@ -585,25 +592,25 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
* must be in the range [0, nb_queue_pairs - 1]
* previously supplied to
* *rte_cryptodev_configure*.
- * @param pkts The address of an array of *nb_pkts* pointers
- * to *rte_mbuf* structures which contain the
- * output packets.
- * @param nb_pkts The number of packets to transmit.
+ * @param ops The address of an array of *nb_ops* pointers
+ * to *rte_crypto_op* structures which contain
+ * the symmetric crypto operations to be processed.
+ * @param nb_ops The number of operations to process.
*
* @return
- * The number of packets actually enqueued on the crypto device. The return
- * value can be less than the value of the *nb_pkts* parameter when the
- * crypto devices queue is full or has been filled up.
- * The number of packets is 0 if the device hasn't been started.
+ * The number of operations actually enqueued on the crypto device. The return
+ * value can be less than the value of the *nb_ops* parameter when the
+ * crypto devices queue is full or has been filled up or if valid parameters
+ * are specified in a *rte_crypto_op*.
*/
static inline uint16_t
rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
return (*dev->enqueue_burst)(
- dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ dev->data->queue_pairs[qp_id], ops, nb_ops);
}
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index ff8e93d..24e00bb 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -27,6 +27,7 @@ DPDK_2.2 {
rte_cryptodev_queue_pair_setup;
rte_cryptodev_queue_pair_start;
rte_cryptodev_queue_pair_stop;
+ rte_crypto_op_pool_create;
local: *;
};
\ No newline at end of file
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index f234ac9..def2e33 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -728,9 +728,6 @@ typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */
typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
* with a single assignment */
-/** Opaque rte_mbuf_offload structure declarations */
-struct rte_mbuf_offload;
-
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
@@ -847,9 +844,6 @@ struct rte_mbuf {
/** Timesync flags for use with IEEE1588. */
uint16_t timesync;
-
- /* Chain of off-load operations to perform on mbuf */
- struct rte_mbuf_offload *offload_ops;
} __rte_cache_aligned;
static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
diff --git a/lib/librte_mbuf_offload/Makefile b/lib/librte_mbuf_offload/Makefile
deleted file mode 100644
index acdb449..0000000
--- a/lib/librte_mbuf_offload/Makefile
+++ /dev/null
@@ -1,52 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2015 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_mbuf_offload.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-EXPORT_MAP := rte_mbuf_offload_version.map
-
-LIBABIVER := 1
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) := rte_mbuf_offload.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD)-include := rte_mbuf_offload.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_cryptodev
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.c b/lib/librte_mbuf_offload/rte_mbuf_offload.c
deleted file mode 100644
index 5c0c9dd..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <rte_common.h>
-
-#include "rte_mbuf_offload.h"
-
-/** Initialize rte_mbuf_offload structure */
-static void
-rte_pktmbuf_offload_init(struct rte_mempool *mp,
- __rte_unused void *opaque_arg,
- void *_op_data,
- __rte_unused unsigned i)
-{
- struct rte_mbuf_offload *ol = _op_data;
-
- memset(_op_data, 0, mp->elt_size);
-
- ol->type = RTE_PKTMBUF_OL_NOT_SPECIFIED;
- ol->mp = mp;
-}
-
-
-struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id)
-{
- struct rte_pktmbuf_offload_pool_private *priv;
- unsigned elt_size = sizeof(struct rte_mbuf_offload) + priv_size;
-
-
- /* lookup mempool in case already allocated */
- struct rte_mempool *mp = rte_mempool_lookup(name);
-
- if (mp != NULL) {
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- if (priv->offload_priv_size < priv_size ||
- mp->elt_size != elt_size ||
- mp->cache_size < cache_size ||
- mp->size < size) {
- mp = NULL;
- return NULL;
- }
- return mp;
- }
-
- mp = rte_mempool_create(
- name,
- size,
- elt_size,
- cache_size,
- sizeof(struct rte_pktmbuf_offload_pool_private),
- NULL,
- NULL,
- rte_pktmbuf_offload_init,
- NULL,
- socket_id,
- 0);
-
- if (mp == NULL)
- return NULL;
-
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- priv->offload_priv_size = priv_size;
- return mp;
-}
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
deleted file mode 100644
index 4345f06..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ /dev/null
@@ -1,307 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_MBUF_OFFLOAD_H_
-#define _RTE_MBUF_OFFLOAD_H_
-
-/**
- * @file
- * RTE mbuf offload
- *
- * The rte_mbuf_offload library provides the ability to specify a device generic
- * off-load operation independent of the current Rx/Tx Ethernet offloads
- * supported within the rte_mbuf structure, and add supports for multiple
- * off-load operations and offload device types.
- *
- * The rte_mbuf_offload specifies the particular off-load operation type, such
- * as a crypto operation, and provides a container for the operations
- * parameter's inside the op union. These parameters are then used by the
- * device which supports that operation to perform the specified offload.
- *
- * This library provides an API to create pre-allocated mempool of offload
- * operations, with supporting allocate and free functions. It also provides
- * APIs for attaching an offload to a mbuf, as well as an API to retrieve a
- * specified offload type from an mbuf offload chain.
- *
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- */
-
-#include <rte_mbuf.h>
-#include <rte_crypto.h>
-
-
-/** packet mbuf offload operation types */
-enum rte_mbuf_ol_op_type {
- RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
- /**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO
- /**< Crypto offload operation */
-};
-
-/**
- * Generic packet mbuf offload
- * This is used to specify a offload operation to be performed on a rte_mbuf.
- * Multiple offload operations can be chained to the same mbuf, but only a
- * single offload operation of a particular type can be in the chain
- */
-struct rte_mbuf_offload {
- struct rte_mbuf_offload *next; /**< next offload in chain */
- struct rte_mbuf *m; /**< mbuf offload is attached to */
- struct rte_mempool *mp; /**< mempool offload allocated from */
-
- enum rte_mbuf_ol_op_type type; /**< offload type */
- union {
- struct rte_crypto_op crypto; /**< Crypto operation */
- } op;
-};
-
-/**< private data structure belonging to packet mbug offload mempool */
-struct rte_pktmbuf_offload_pool_private {
- uint16_t offload_priv_size;
- /**< Size of private area in each mbuf_offload. */
-};
-
-
-/**
- * Creates a mempool of rte_mbuf_offload objects
- *
- * EXPERIMENTAL: this API file may change without prior notice
- *
- * @param name mempool name
- * @param size number of objects in mempool
- * @param cache_size cache size of objects for each core
- * @param priv_size size of private data to be allocated with each
- * rte_mbuf_offload object
- * @param socket_id Socket on which to allocate mempool objects
- *
- * @return
- * - On success returns a valid mempool of rte_mbuf_offload objects
- * - On failure return NULL
- */
-extern struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id);
-
-
-/**
- * Returns private data size allocated with each rte_mbuf_offload object by
- * the mempool
- *
- * @param mpool rte_mbuf_offload mempool
- *
- * @return private data size
- */
-static inline uint16_t
-__rte_pktmbuf_offload_priv_size(struct rte_mempool *mpool)
-{
- struct rte_pktmbuf_offload_pool_private *priv =
- rte_mempool_get_priv(mpool);
-
- return priv->offload_priv_size;
-}
-
-/**
- * Get specified off-load operation type from mbuf.
- *
- * @param m packet mbuf.
- * @param type offload operation type requested.
- *
- * @return
- * - On success retruns rte_mbuf_offload pointer
- * - On failure returns NULL
- *
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_get(struct rte_mbuf *m, enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol;
-
- for (ol = m->offload_ops; ol != NULL; ol = ol->next)
- if (ol->type == type)
- return ol;
-
- return ol;
-}
-
-/**
- * Attach a rte_mbuf_offload to a mbuf. We only support a single offload of any
- * one type in our chain of offloads.
- *
- * @param m packet mbuf.
- * @param ol rte_mbuf_offload strucutre to be attached
- *
- * @returns
- * - On success returns the pointer to the offload we just added
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_attach(struct rte_mbuf *m, struct rte_mbuf_offload *ol)
-{
- struct rte_mbuf_offload **ol_last;
-
- for (ol_last = &m->offload_ops; ol_last[0] != NULL;
- ol_last = &ol_last[0]->next)
- if (ol_last[0]->type == ol->type)
- return NULL;
-
- ol_last[0] = ol;
- ol_last[0]->m = m;
- ol_last[0]->next = NULL;
-
- return ol_last[0];
-}
-
-
-/** Rearms rte_mbuf_offload default parameters */
-static inline void
-__rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
- enum rte_mbuf_ol_op_type type)
-{
- ol->m = NULL;
- ol->type = type;
-
- switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO:
- __rte_crypto_op_reset(&ol->op.crypto); break;
- default:
- break;
- }
-}
-
-/** Allocate rte_mbuf_offload from mempool */
-static inline struct rte_mbuf_offload *
-__rte_pktmbuf_offload_raw_alloc(struct rte_mempool *mp)
-{
- void *buf = NULL;
-
- if (rte_mempool_get(mp, &buf) < 0)
- return NULL;
-
- return (struct rte_mbuf_offload *)buf;
-}
-
-/**
- * Allocate a rte_mbuf_offload with a specified operation type from
- * rte_mbuf_offload mempool
- *
- * @param mpool rte_mbuf_offload mempool
- * @param type offload operation type
- *
- * @returns
- * - On success returns a valid rte_mbuf_offload structure
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_alloc(struct rte_mempool *mpool,
- enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol = __rte_pktmbuf_offload_raw_alloc(mpool);
-
- if (ol != NULL)
- __rte_pktmbuf_offload_reset(ol, type);
-
- return ol;
-}
-
-/**
- * free rte_mbuf_offload structure
- */
-static inline void
-rte_pktmbuf_offload_free(struct rte_mbuf_offload *ol)
-{
- if (ol != NULL && ol->mp != NULL)
- rte_mempool_put(ol->mp, ol);
-}
-
-/**
- * Checks if the private data of a rte_mbuf_offload has enough capacity for
- * requested size
- *
- * @returns
- * - if sufficient space available returns pointer to start of private data
- * - if insufficient space returns NULL
- */
-static inline void *
-__rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
- uint16_t size)
-{
- uint16_t priv_size;
-
- if (likely(ol->mp != NULL)) {
- priv_size = __rte_pktmbuf_offload_priv_size(ol->mp);
-
- if (likely(priv_size >= size))
- return (void *)(ol + 1);
- }
- return NULL;
-}
-
-/**
- * Allocate space for crypto xforms in the private data space of the
- * rte_mbuf_offload. This also defaults the crypto xform type and configures
- * the chaining of the xform in the crypto operation
- *
- * @return
- * - On success returns pointer to first crypto xform in crypto operations chain
- * - On failure returns NULL
- */
-static inline struct rte_crypto_xform *
-rte_pktmbuf_offload_alloc_crypto_xforms(struct rte_mbuf_offload *ol,
- unsigned nb_xforms)
-{
- struct rte_crypto_xform *xform;
- void *priv_data;
- uint16_t size;
-
- size = sizeof(struct rte_crypto_xform) * nb_xforms;
- priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
-
- if (priv_data == NULL)
- return NULL;
-
- ol->op.crypto.xform = xform = (struct rte_crypto_xform *)priv_data;
-
- do {
- xform->type = RTE_CRYPTO_XFORM_NOT_SPECIFIED;
- xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
- } while (xform);
-
- return ol->op.crypto.xform;
-}
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MBUF_OFFLOAD_H_ */
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map b/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
deleted file mode 100644
index 3d3b06a..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
+++ /dev/null
@@ -1,7 +0,0 @@
-DPDK_2.2 {
- global:
-
- rte_pktmbuf_offload_pool_create;
-
- local: *;
-};
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH] cryptodev: API change to rte_crypto_op bursts
2016-01-30 13:07 [dpdk-dev] [PATCH] cryptodev: API change to rte_crypto_op bursts Declan Doherty
@ 2016-02-08 17:50 ` Trahe, Fiona
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 0/2] cryptodev API changes Declan Doherty
1 sibling, 0 replies; 62+ messages in thread
From: Trahe, Fiona @ 2016-02-08 17:50 UTC (permalink / raw)
To: Doherty, Declan, dev
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Declan Doherty
> Sent: Saturday, January 30, 2016 1:07 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH] cryptodev: API change to rte_crypto_op bursts
>
> This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts
> rte_crypto_op's rather than the current implementation which operates on
> rte_mbuf bursts which simplifies the burst processing in the crypto PMDs and the
> use of crypto operations in general.
>
> The changes also separates the symmetric operation parameters from the more
> general operation parameters, this will simplify the integration of
> asymmetric crypto operations in the future.
>
> As well as the changes to the crypto APIs this patch adds function for managing
> rte_crypto_op pools to the cryptodev API. It modifies the existing PMDs, unit
> tests and sample application to work with the modified APIs and finally
> removes the now unused rte_mbuf_offload library.
>
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> ---
//snip//
> +
> +/**
> + * Reset the fields of a symmetric operation to their default values.
> *
> * @param op The crypto operation to be reset.
> */
> static inline void
> -__rte_crypto_op_reset(struct rte_crypto_op *op)
> +__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
> {
> op->type = RTE_CRYPTO_OP_SESSIONLESS;
Maybe rename the type param in struct rte_crypto_sym_op to session_type
and op param in this fn to sym_op?
To avoid confusion with op->type in function below.
> - op->dst.m = NULL;
> - op->dst.offset = 0;
> +
> + op->m_src = NULL;
> + op->m_dst = NULL;
> +}
> +
> +/**
> + * Reset the fields of a crypto operation to their default values.
> + *
> + * @param op The crypto operation to be reset.
> + * @param type The crypto operation type.
> + */
> +static inline void
> +__rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type
> type)
> +{
> + op->status = RTE_CRYPTO_OP_STATUS_NOT_SUBMITTED;
> +
> + if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
> + op->type = type;
> + __rte_crypto_sym_op_reset(&op->sym);
> + }
> }
>
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v2 0/2] cryptodev API changes
2016-01-30 13:07 [dpdk-dev] [PATCH] cryptodev: API change to rte_crypto_op bursts Declan Doherty
2016-02-08 17:50 ` Trahe, Fiona
@ 2016-02-19 11:01 ` Declan Doherty
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 1/2] cryptodev: API tidy and changes to support future extensions Declan Doherty
` (3 more replies)
1 sibling, 4 replies; 62+ messages in thread
From: Declan Doherty @ 2016-02-19 11:01 UTC (permalink / raw)
To: dev
This patch set separates the symmetric crypto operations from generic operations
and then modifies the cryptodev burst API to accept bursts of rte_crypto_op
rather than rte_mbufs.
This patch set is dependent on the following bug fixes patches:
aesni_mb: strict-aliasing rule compilation fix
(http://dpdk.org/ml/archives/dev/2016-February/033193.html)
qat:fix build on 32-bit systems
(http://dpdk.org/ml/archives/dev/2016-February/033442.html)
aesni_mb: fix wrong return value
(http://dpdk.org/ml/archives/dev/2016-February/033193.html)
Various fixes for L2fwd-crypto
Declan Doherty (1):
cryptodev: change burst API to be crypto op oriented
Fiona Trahe (1):
cryptodev: API tidy and changes to support future extensions
MAINTAINERS | 4 -
app/test/test_cryptodev.c | 890 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 270 ++++---
config/common_bsdapp | 7 -
config/common_linuxapp | 11 +-
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 199 ++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 18 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 6 +-
drivers/crypto/qat/qat_crypto.c | 154 ++--
drivers/crypto/qat/qat_crypto.h | 14 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 281 ++++---
lib/Makefile | 1 -
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 802 +++++++------------
lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 113 ++-
lib/librte_cryptodev/rte_cryptodev.h | 183 ++---
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 1 +
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 307 -------
.../rte_mbuf_offload_version.map | 7 -
27 files changed, 2114 insertions(+), 2005 deletions(-)
create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
delete mode 100644 lib/librte_mbuf_offload/Makefile
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v2 1/2] cryptodev: API tidy and changes to support future extensions
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 0/2] cryptodev API changes Declan Doherty
@ 2016-02-19 11:01 ` Declan Doherty
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 2/2] cryptodev: change burst API to be crypto op oriented Declan Doherty
` (2 subsequent siblings)
3 siblings, 0 replies; 62+ messages in thread
From: Declan Doherty @ 2016-02-19 11:01 UTC (permalink / raw)
To: dev
From: Fiona Trahe <fiona.trahe@intel.com>
This patch splits symmetric specific definitions and functions away from the
common crypto APIs to facilitate the future extension and expansion of the
cryptodev framework, in order to allow asymmetric crypto operations to be
introduced at a later date, as well as to clean the logical structure of the
public includes. The patch also introduces the _sym prefix to symmetric
specific structure and functions to improve clarity in the API.
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
---
app/test/test_cryptodev.c | 164 +++---
app/test/test_cryptodev_perf.c | 79 +--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 44 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 6 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 4 +-
drivers/crypto/qat/qat_crypto.c | 51 +-
drivers/crypto/qat/qat_crypto.h | 10 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 33 +-
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 563 +------------------
lib/librte_cryptodev/rte_crypto_sym.h | 613 +++++++++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 39 +-
lib/librte_cryptodev/rte_cryptodev.h | 80 ++-
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_mbuf_offload/rte_mbuf_offload.h | 22 +-
16 files changed, 912 insertions(+), 837 deletions(-)
create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 62f8fb0..951b443 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,13 +57,13 @@ struct crypto_testsuite_params {
};
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
struct rte_mbuf_offload *ol;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -78,7 +78,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_param);
@@ -165,7 +165,8 @@ testsuite_setup(void)
ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
"MBUF_OFFLOAD_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_xform),
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -220,7 +221,7 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id,
&ts_params->conf),
@@ -275,7 +276,7 @@ ut_setup(void)
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
ts_params->conf.session_mp.nb_objs =
- (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_PMD) ?
+ (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_SYM_PMD) ?
DEFAULT_NUM_OPS_INFLIGHT :
DEFAULT_NUM_OPS_INFLIGHT;
@@ -319,7 +320,7 @@ ut_teardown(void)
/* free crypto session structure */
if (ut_params->sess) {
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
ut_params->sess);
ut_params->sess = NULL;
}
@@ -464,7 +465,7 @@ test_queue_pair_descriptor_setup(void)
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- ts_params->conf.session_mp.nb_objs = dev_info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = dev_info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
&ts_params->conf), "Failed to configure cryptodev %u",
@@ -766,7 +767,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -776,7 +777,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -786,20 +787,21 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -862,18 +864,18 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_xforms(
+ TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
ut_params->ol, 2),
"failed to allocate space for crypto transforms");
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
/* cipher parameters */
ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
@@ -882,7 +884,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
@@ -958,7 +960,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_SHA1);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -967,7 +969,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -977,13 +979,14 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -991,7 +994,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1066,7 +1069,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1075,7 +1078,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1085,13 +1088,14 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1099,7 +1103,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1168,7 +1172,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_SHA256);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1177,7 +1181,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1187,13 +1191,14 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1201,7 +1206,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1282,7 +1287,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1291,7 +1296,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1301,15 +1306,16 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1317,7 +1323,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1371,7 +1377,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params);
@@ -1386,8 +1392,9 @@ test_AES_CBC_HMAC_SHA512_decrypt_digest_verify(void)
"Failed to create session params");
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
return test_AES_CBC_HMAC_SHA512_decrypt_perform(ut_params->sess,
@@ -1400,7 +1407,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
{
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1409,7 +1416,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1422,7 +1429,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params)
{
@@ -1442,7 +1449,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1450,7 +1457,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1522,7 +1529,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1531,7 +1538,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1541,13 +1548,14 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1555,7 +1563,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)
rte_pktmbuf_prepend(ut_params->ibuf,
@@ -1615,7 +1623,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_AES_XCBC);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1624,7 +1632,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1634,13 +1642,14 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1648,7 +1657,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
@@ -1752,7 +1761,7 @@ test_multi_session(void)
struct crypto_unittest_params *ut_params = &unittest_params;
struct rte_cryptodev_info dev_info;
- struct rte_cryptodev_session **sessions;
+ struct rte_cryptodev_sym_session **sessions;
uint16_t i;
@@ -1761,12 +1770,13 @@ test_multi_session(void)
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- sessions = rte_malloc(NULL, (sizeof(struct rte_cryptodev_session *) *
- dev_info.max_nb_sessions) + 1, 0);
+ sessions = rte_malloc(NULL,
+ (sizeof(struct rte_cryptodev_sym_session *) *
+ dev_info.sym.max_nb_sessions) + 1, 0);
/* Create multiple crypto sessions*/
- for (i = 0; i < dev_info.max_nb_sessions; i++) {
- sessions[i] = rte_cryptodev_session_create(
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++) {
+ sessions[i] = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(sessions[i],
@@ -1781,13 +1791,13 @@ test_multi_session(void)
}
/* Next session create should fail */
- sessions[i] = rte_cryptodev_session_create(ts_params->valid_devs[0],
+ sessions[i] = rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NULL(sessions[i],
"Session creation succeeded unexpectedly!");
- for (i = 0; i < dev_info.max_nb_sessions; i++)
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++)
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
sessions[i]);
rte_free(sessions);
@@ -1806,7 +1816,7 @@ test_not_in_place_crypto(void)
/* Create multiple crypto sessions*/
- ut_params->sess = rte_cryptodev_session_create(
+ ut_params->sess = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0], &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1828,7 +1838,7 @@ test_not_in_place_crypto(void)
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1836,7 +1846,7 @@ test_not_in_place_crypto(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1962,7 +1972,7 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite = {
static int
test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_qat_testsuite);
}
static struct test_command cryptodev_qat_cmd = {
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 728bcf0..42dd9bc 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -63,12 +63,12 @@ struct crypto_testsuite_params {
#define MAX_NUM_OF_OPS_PER_UT (128)
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf_offload *ol;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
@@ -127,7 +127,7 @@ testsuite_setup(void)
ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -179,7 +179,7 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->dev_id,
&ts_params->conf),
@@ -252,7 +252,7 @@ ut_teardown(void)
/* free crypto session structure */
if (ut_params->sess)
- rte_cryptodev_session_free(ts_params->dev_id,
+ rte_cryptodev_sym_session_free(ts_params->dev_id,
ut_params->sess);
/* free crypto operation structure */
@@ -1713,7 +1713,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1723,7 +1723,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1733,7 +1733,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1741,7 +1741,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < num_to_submit ; b++) {
tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ (const char *)data_params[0].expected.ciphertext,
data_params[0].length, 0);
TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
@@ -1753,12 +1753,12 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
DIGEST_BYTE_LENGTH_SHA256);
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO);
+ ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
@@ -1814,7 +1814,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, burst_size);
if (burst_received == 0)
failed_polls++;
@@ -1824,10 +1825,13 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
total_cycles += end_cycles - start_cycles;
}
while (num_received != num_to_submit) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num,
+ 0, NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, burst_size);
if (burst_received == 0)
failed_polls++;
@@ -1876,7 +1880,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1885,7 +1889,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
ut_params->cipher_xform.cipher.key.length = CIPHER_IV_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1895,7 +1899,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1928,12 +1932,12 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1957,15 +1961,19 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent) < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent : DEFAULT_BURST_SIZE);
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, tx_mbufs,
+ ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
+ < DEFAULT_BURST_SIZE) ?
+ DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
+ DEFAULT_BURST_SIZE);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
@@ -1973,10 +1981,13 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
num_received += burst_received;
}
while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num,
+ 0, NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num, 0,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num, 0,
rx_mbufs, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
@@ -1987,7 +1998,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
mmps = (double)num_received*mhz/(end_cycles - start_cycles);
throughput = mmps*data_params[index].length*8;
printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
- data_params[index].length, num_sent, num_received);
+ data_params[index].length, num_sent, num_received);
printf("\t%.2f\t%u", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
@@ -2044,7 +2055,7 @@ perftest_aesni_mb_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
static int
perftest_qat_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_testsuite);
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index c7d884c..61d93cd 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -109,7 +109,7 @@ calculate_auth_precomputes(hash_one_block_t one_block_hash,
/** Get xform chain order */
static int
-aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
{
/*
* Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
@@ -119,12 +119,12 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
if (xform->next == NULL || xform->next->next != NULL)
return -1;
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return HASH_CIPHER;
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return CIPHER_HASH;
return -1;
@@ -134,11 +134,11 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
static int
aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
hash_one_block_t hash_oneblock_fn;
- if (xform->type != RTE_CRYPTO_XFORM_AUTH) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
MB_LOG_ERR("Crypto xform struct not of type auth");
return -1;
}
@@ -196,11 +196,11 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
static int
aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
aes_keyexp_t aes_keyexp_fn;
- if (xform->type != RTE_CRYPTO_XFORM_CIPHER) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
MB_LOG_ERR("Crypto xform struct not of type cipher");
return -1;
}
@@ -259,10 +259,10 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
- const struct rte_crypto_xform *auth_xform = NULL;
- const struct rte_crypto_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
/* Select Crypto operation - hash then cipher / cipher then hash */
switch (aesni_mb_get_chain_order(xform)) {
@@ -296,11 +296,11 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
if (unlikely(crypto_op->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
@@ -313,7 +313,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
return NULL;
sess = (struct aesni_mb_session *)
- ((struct rte_cryptodev_session *)_sess)->_private;
+ ((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
sess, crypto_op->xform) != 0)) {
@@ -339,7 +339,8 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
*/
static JOB_AES_HMAC *
process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_op *c_op, struct aesni_mb_session *session)
+ struct rte_crypto_sym_op *c_op,
+ struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
@@ -432,14 +433,14 @@ static struct rte_mbuf *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
struct rte_mbuf *m;
- struct rte_crypto_op *c_op;
+ struct rte_crypto_sym_op *c_op;
if (job->user_data == NULL)
return NULL;
/* handled retrieved job */
m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_op *)job->user_data2;
+ c_op = (struct rte_crypto_sym_op *)job->user_data2;
/* set status as successful by default */
c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -459,7 +460,7 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_OP_SESSIONLESS) {
+ if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
rte_mempool_put(qp->sess_mp, c_op->session);
c_op->session = NULL;
}
@@ -511,7 +512,8 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
int i, processed_jobs = 0;
for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i], RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(bufs[i],
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
qp->qp_stats.enqueue_err_count++;
goto flush_jobs;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index e43b898..3cd9990 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -108,7 +108,7 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
if (dev_info != NULL) {
dev_info->dev_type = dev->dev_type;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->max_nb_sessions = internals->max_nb_sessions;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
}
}
@@ -243,7 +243,7 @@ aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
/** Configure a aesni multi-buffer session from a crypto xform chain */
static void *
aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform, void *sess)
{
struct aesni_mb_private *internals = dev->data->dev_private;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 2f98609..ab70c15 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -218,7 +218,7 @@ struct aesni_mb_session {
extern int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform);
+ const struct rte_crypto_sym_xform *xform);
/** device specific operations function pointer structure */
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 828756b..e7b9027 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,37 +89,37 @@ void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
}
static int
-qat_get_cmd_id(const struct rte_crypto_xform *xform)
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
{
if (xform->next == NULL)
return -1;
/* Cipher Only */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_CIPHER; */
/* Authentication Only */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_AUTH; */
/* Cipher then Authenticate */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
/* Authenticate then Cipher */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
return -1;
}
static struct rte_crypto_auth_xform *
-qat_get_auth_xform(struct rte_crypto_xform *xform)
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return &xform->auth;
xform = xform->next;
@@ -129,10 +129,10 @@ qat_get_auth_xform(struct rte_crypto_xform *xform)
}
static struct rte_crypto_cipher_xform *
-qat_get_cipher_xform(struct rte_crypto_xform *xform)
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return &xform->cipher;
xform = xform->next;
@@ -144,7 +144,7 @@ qat_get_cipher_xform(struct rte_crypto_xform *xform)
void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private)
+ struct rte_crypto_sym_xform *xform, void *session_private)
{
struct qat_pmd_private *internals = dev->data->dev_private;
@@ -261,7 +261,7 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
auth_xform->digest_length))
goto error_out;
- return (struct rte_cryptodev_session *)session;
+ return (struct rte_crypto_sym_session *)session;
error_out:
rte_mempool_put(internals->sess_mp, session);
@@ -275,7 +275,7 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
+uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
register struct qat_queue *queue;
@@ -327,7 +327,8 @@ kick_tail:
}
uint16_t
-qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct rte_mbuf_offload *ol;
struct qat_queue *queue;
@@ -343,12 +344,13 @@ qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
msg_counter != nb_pkts) {
rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf, RTE_PKTMBUF_OL_CRYPTO);
-
+ ol = rte_pktmbuf_offload_get(rx_mbuf,
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ ol->op.crypto.status =
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
@@ -384,20 +386,21 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
"to (%p) mbuf.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
" requests mbuf (%p) is sessionless.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type != RTE_CRYPTODEV_QAT_PMD)) {
+ if (unlikely(ol->op.crypto.session->type
+ != RTE_CRYPTODEV_QAT_SYM_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
@@ -520,8 +523,8 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
ADF_NUM_SYM_QPS_PER_BUNDLE *
ADF_NUM_BUNDLES_PER_DEV;
- info->max_nb_sessions = internals->max_nb_sessions;
- info->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
}
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index d680364..e9f71fe 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -109,16 +109,18 @@ qat_crypto_sym_session_init(struct rte_mempool *mempool, void *priv_sess);
extern void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+ struct rte_crypto_sym_xform *xform, void *session_private);
extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
uint16_t
-qat_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
uint16_t
-qat_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index e500c1e..85700fc 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,11 +89,11 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->pci_dev->addr.devid,
cryptodev->pci_dev->addr.function);
- cryptodev->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
+ cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index a950b74..93de786 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -124,13 +124,13 @@ struct l2fwd_crypto_options {
enum l2fwd_crypto_xform_chain xform_chain;
- struct rte_crypto_xform cipher_xform;
+ struct rte_crypto_sym_xform cipher_xform;
uint8_t ckey_data[32];
- struct rte_crypto_key iv_key;
+ struct rte_crypto_sym_key iv_key;
uint8_t ivkey_data[16];
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform auth_xform;
uint8_t akey_data[128];
};
@@ -142,8 +142,11 @@ struct l2fwd_crypto_params {
unsigned digest_length;
unsigned block_size;
- struct rte_crypto_key iv_key;
- struct rte_cryptodev_session *session;
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } iv;
+ struct rte_cryptodev_sym_session *session;
};
/** lcore configuration */
@@ -372,7 +375,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
/* Append space for digest to end of packet */
ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
@@ -474,11 +477,11 @@ generate_random_key(uint8_t *key, unsigned length)
key[i] = rand() % 0xff;
}
-static struct rte_cryptodev_session *
+static struct rte_cryptodev_sym_session *
initialize_crypto_session(struct l2fwd_crypto_options *options,
uint8_t cdev_id)
{
- struct rte_crypto_xform *first_xform;
+ struct rte_crypto_sym_xform *first_xform;
if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
first_xform = &options->cipher_xform;
@@ -489,7 +492,7 @@ initialize_crypto_session(struct l2fwd_crypto_options *options,
}
/* Setup Cipher Parameters */
- return rte_cryptodev_session_create(cdev_id, first_xform);
+ return rte_cryptodev_sym_session_create(cdev_id, first_xform);
}
static void
@@ -610,7 +613,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
m = pkts_burst[j];
ol = rte_pktmbuf_offload_alloc(
l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
/*
* If we can't allocate a offload, then drop
* the rest of the burst and dequeue and
@@ -689,7 +692,7 @@ parse_cryptodev_type(enum rte_cryptodev_type *type, char *optarg)
*type = RTE_CRYPTODEV_AESNI_MB_PMD;
return 0;
} else if (strcmp("QAT", optarg) == 0) {
- *type = RTE_CRYPTODEV_QAT_PMD;
+ *type = RTE_CRYPTODEV_QAT_SYM_PMD;
return 0;
}
@@ -745,7 +748,7 @@ parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
/** Parse crypto key command line argument */
static int
-parse_key(struct rte_crypto_key *key __rte_unused,
+parse_key(struct rte_crypto_sym_key *key __rte_unused,
unsigned length __rte_unused, char *arg __rte_unused)
{
printf("Currently an unsupported argument!\n");
@@ -982,7 +985,7 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
switch (options->cdev_type) {
case RTE_CRYPTODEV_AESNI_MB_PMD:
printf("cryptodev type: AES-NI MB PMD\n"); break;
- case RTE_CRYPTODEV_QAT_PMD:
+ case RTE_CRYPTODEV_QAT_SYM_PMD:
printf("cryptodev type: QAT PMD\n"); break;
default:
break;
@@ -1179,7 +1182,7 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports)
unsigned i, cdev_id, cdev_count, enabled_cdev_count = 0;
int retval;
- if (options->cdev_type == RTE_CRYPTODEV_QAT_PMD) {
+ if (options->cdev_type == RTE_CRYPTODEV_QAT_SYM_PMD) {
if (rte_cryptodev_count() < nb_ports)
return -1;
} else if (options->cdev_type == RTE_CRYPTODEV_AESNI_MB_PMD) {
diff --git a/lib/librte_cryptodev/Makefile b/lib/librte_cryptodev/Makefile
index 81fa3fc..0d59229 100644
--- a/lib/librte_cryptodev/Makefile
+++ b/lib/librte_cryptodev/Makefile
@@ -45,6 +45,7 @@ SRCS-y += rte_cryptodev.c
# export include files
SYMLINK-y-include += rte_crypto.h
+SYMLINK-y-include += rte_crypto_sym.h
SYMLINK-y-include += rte_cryptodev.h
SYMLINK-y-include += rte_cryptodev_pmd.h
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index 42343a8..df0c0b8 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,572 +36,15 @@
/**
* @file rte_crypto.h
*
- * RTE Cryptographic Definitions
+ * RTE Cryptography Common Definitions
*
- * Defines symmetric cipher and authentication algorithms and modes, as well
- * as supported symmetric crypto operation combinations.
*/
#ifdef __cplusplus
extern "C" {
#endif
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_mempool.h>
-
-/** Symmetric Cipher Algorithms */
-enum rte_crypto_cipher_algorithm {
- RTE_CRYPTO_CIPHER_NULL = 1,
- /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
-
- RTE_CRYPTO_CIPHER_3DES_CBC,
- /**< Triple DES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_3DES_CTR,
- /**< Triple DES algorithm in CTR mode */
- RTE_CRYPTO_CIPHER_3DES_ECB,
- /**< Triple DES algorithm in ECB mode */
-
- RTE_CRYPTO_CIPHER_AES_CBC,
- /**< AES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_AES_CCM,
- /**< AES algorithm in CCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_CCM* element of the
- * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_xform* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation
- */
- RTE_CRYPTO_CIPHER_AES_CTR,
- /**< AES algorithm in Counter mode */
- RTE_CRYPTO_CIPHER_AES_ECB,
- /**< AES algorithm in ECB mode */
- RTE_CRYPTO_CIPHER_AES_F8,
- /**< AES algorithm in F8 mode */
- RTE_CRYPTO_CIPHER_AES_GCM,
- /**< AES algorithm in GCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_GCM* element of the
- * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_setup_data* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation.
- */
- RTE_CRYPTO_CIPHER_AES_XTS,
- /**< AES algorithm in XTS mode */
-
- RTE_CRYPTO_CIPHER_ARC4,
- /**< (A)RC4 cipher algorithm */
-
- RTE_CRYPTO_CIPHER_KASUMI_F8,
- /**< Kasumi algorithm in F8 mode */
-
- RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- /**< SNOW3G algorithm in UEA2 mode */
-
- RTE_CRYPTO_CIPHER_ZUC_EEA3
- /**< ZUC algorithm in EEA3 mode */
-};
-
-/** Symmetric Cipher Direction */
-enum rte_crypto_cipher_operation {
- RTE_CRYPTO_CIPHER_OP_ENCRYPT,
- /**< Encrypt cipher operation */
- RTE_CRYPTO_CIPHER_OP_DECRYPT
- /**< Decrypt cipher operation */
-};
-
-/** Crypto key structure */
-struct rte_crypto_key {
- uint8_t *data; /**< pointer to key data */
- phys_addr_t phys_addr;
- size_t length; /**< key length in bytes */
-};
-
-/**
- * Symmetric Cipher Setup Data.
- *
- * This structure contains data relating to Cipher (Encryption and Decryption)
- * use to create a session.
- */
-struct rte_crypto_cipher_xform {
- enum rte_crypto_cipher_operation op;
- /**< This parameter determines if the cipher operation is an encrypt or
- * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
- * only encrypt operations are valid.
- */
- enum rte_crypto_cipher_algorithm algo;
- /**< Cipher algorithm */
-
- struct rte_crypto_key key;
- /**< Cipher key
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
- * point to a concatenation of the AES encryption key followed by a
- * keymask. As per RFC3711, the keymask should be padded with trailing
- * bytes to match the length of the encryption key used.
- *
- * For AES-XTS mode of operation, two keys must be provided and
- * key.data must point to the two keys concatenated together (Key1 ||
- * Key2). The cipher key length will contain the total size of both
- * keys.
- *
- * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
- * 192 bits (24 bytes) or 256 bits (32 bytes).
- *
- * For the CCM mode of operation, the only supported key length is 128
- * bits (16 bytes).
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
- * should be set to the combined length of the encryption key and the
- * keymask. Since the keymask and the encryption key are the same size,
- * key.length should be set to 2 x the AES encryption key length.
- *
- * For the AES-XTS mode of operation:
- * - Two keys must be provided and key.length refers to total length of
- * the two keys.
- * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
- * - Both keys must have the same size.
- **/
-};
-
-/** Symmetric Authentication / Hash Algorithms */
-enum rte_crypto_auth_algorithm {
- RTE_CRYPTO_AUTH_NULL = 1,
- /**< NULL hash algorithm. */
-
- RTE_CRYPTO_AUTH_AES_CBC_MAC,
- /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
- RTE_CRYPTO_AUTH_AES_CCM,
- /**< AES algorithm in CCM mode. This is an authenticated cipher. When
- * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
- * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
- * set up the related rte_crypto_cipher_setup_data structure in the
- * session context or the corresponding parameter in the crypto
- * operation data structures op_params parameter MUST be set for a
- * session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_CMAC,
- /**< AES CMAC algorithm. */
- RTE_CRYPTO_AUTH_AES_GCM,
- /**< AES algorithm in GCM mode. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_GMAC,
- /**< AES GMAC algorithm. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_XCBC_MAC,
- /**< AES XCBC algorithm. */
-
- RTE_CRYPTO_AUTH_KASUMI_F9,
- /**< Kasumi algorithm in F9 mode. */
-
- RTE_CRYPTO_AUTH_MD5,
- /**< MD5 algorithm */
- RTE_CRYPTO_AUTH_MD5_HMAC,
- /**< HMAC using MD5 algorithm */
-
- RTE_CRYPTO_AUTH_SHA1,
- /**< 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA1_HMAC,
- /**< HMAC using 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224,
- /**< 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224_HMAC,
- /**< HMAC using 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256,
- /**< 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256_HMAC,
- /**< HMAC using 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384,
- /**< 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384_HMAC,
- /**< HMAC using 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512,
- /**< 512 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512_HMAC,
- /**< HMAC using 512 bit SHA algorithm. */
-
- RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- /**< SNOW3G algorithm in UIA2 mode. */
-
- RTE_CRYPTO_AUTH_ZUC_EIA3,
- /**< ZUC algorithm in EIA3 mode */
-};
-
-/** Symmetric Authentication / Hash Operations */
-enum rte_crypto_auth_operation {
- RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
- RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
-};
-
-/**
- * Authentication / Hash transform data.
- *
- * This structure contains data relating to an authentication/hash crypto
- * transforms. The fields op, algo and digest_length are common to all
- * authentication transforms and MUST be set.
- */
-struct rte_crypto_auth_xform {
- enum rte_crypto_auth_operation op;
- /**< Authentication operation type */
- enum rte_crypto_auth_algorithm algo;
- /**< Authentication algorithm selection */
-
- struct rte_crypto_key key; /**< Authentication key data.
- * The authentication key length MUST be less than or equal to the
- * block size of the algorithm. It is the callers responsibility to
- * ensure that the key length is compliant with the standard being used
- * (for example RFC 2104, FIPS 198a).
- */
-
- uint32_t digest_length;
- /**< Length of the digest to be returned. If the verify option is set,
- * this specifies the length of the digest to be compared for the
- * session.
- *
- * If the value is less than the maximum length allowed by the hash,
- * the result shall be truncated. If the value is greater than the
- * maximum length allowed by the hash then an error will be generated
- * by *rte_cryptodev_session_create* or by the
- * *rte_cryptodev_enqueue_burst* if using session-less APIs.
- */
-
- uint32_t add_auth_data_length;
- /**< The length of the additional authenticated data (AAD) in bytes.
- * The maximum permitted value is 240 bytes, unless otherwise specified
- * below.
- *
- * This field must be specified when the hash algorithm is one of the
- * following:
- *
- * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
- * length of the IV (which should be 16).
- *
- * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
- * the length of the Additional Authenticated Data (called A, in NIST
- * SP800-38D).
- *
- * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
- * the length of the associated data (called A, in NIST SP800-38C).
- * Note that this does NOT include the length of any padding, or the
- * 18 bytes reserved at the start of the above field to store the
- * block B0 and the encoded length. The maximum permitted value in
- * this case is 222 bytes.
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
- * this field is not used and should be set to 0. Instead the length
- * of the AAD data is specified in the message length to hash field of
- * the rte_crypto_op_data structure.
- */
-};
-
-/** Crypto transformation types */
-enum rte_crypto_xform_type {
- RTE_CRYPTO_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
- RTE_CRYPTO_XFORM_AUTH, /**< Authentication xform */
- RTE_CRYPTO_XFORM_CIPHER /**< Cipher xform */
-};
-
-/**
- * Crypto transform structure.
- *
- * This is used to specify the crypto transforms required, multiple transforms
- * can be chained together to specify a chain transforms such as authentication
- * then cipher, or cipher then authentication. Each transform structure can
- * hold a single transform, the type field is used to specify which transform
- * is contained within the union
- */
-struct rte_crypto_xform {
- struct rte_crypto_xform *next; /**< next xform in chain */
-
- enum rte_crypto_xform_type type; /**< xform type */
- union {
- struct rte_crypto_auth_xform auth;
- /**< Authentication / hash xform */
- struct rte_crypto_cipher_xform cipher;
- /**< Cipher xform */
- };
-};
-
-/**
- * Crypto operation session type. This is used to specify whether a crypto
- * operation has session structure attached for immutable parameters or if all
- * operation information is included in the operation data structure.
- */
-enum rte_crypto_op_sess_type {
- RTE_CRYPTO_OP_WITH_SESSION, /**< Session based crypto operation */
- RTE_CRYPTO_OP_SESSIONLESS /**< Session-less crypto operation */
-};
-
-/** Status of crypto operation */
-enum rte_crypto_op_status {
- RTE_CRYPTO_OP_STATUS_SUCCESS,
- /**< Operation completed successfully */
- RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
- /**< Operation not yet submitted to a cryptodev */
- RTE_CRYPTO_OP_STATUS_ENQUEUED,
- /**< Operation is enqueued on device */
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
- /**< Authentication verification failed */
- RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
- /**< Operation failed due to invalid arguments in request */
- RTE_CRYPTO_OP_STATUS_ERROR,
- /**< Error handling operation */
-};
-
-/**
- * Cryptographic Operation Data.
- *
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_enqueue_burst() call
- * for performing cipher, hash, or a combined hash and cipher operations.
- */
-struct rte_crypto_op {
- enum rte_crypto_op_sess_type type;
- enum rte_crypto_op_status status;
-
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
-
- union {
- struct rte_cryptodev_session *session;
- /**< Handle for the initialised session context */
- struct rte_crypto_xform *xform;
- /**< Session-less API crypto operation parameters */
- };
-
- struct {
- struct {
- uint32_t offset;
- /**< Starting point for cipher processing, specified
- * as number of bytes from start of data in the source
- * buffer. The result of the cipher operation will be
- * written back into the output buffer starting at
- * this location.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source buffer
- * on which the cryptographic operation will be
- * computed. This must be a multiple of the block size
- * if a block cipher is being used. This is also the
- * same as the result length.
- *
- * @note
- * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
- * this value should not include the length of the
- * padding or the length of the MAC; the driver will
- * compute the actual number of bytes over which the
- * encryption will occur, which will include these
- * values.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
- * field should be set to 0.
- */
- } to_cipher; /**< Data offsets and length for ciphering */
-
- struct {
- uint32_t offset;
- /**< Starting point for hash processing, specified as
- * number of bytes from start of packet in source
- * buffer.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
- * mode of operation, this field specifies the start
- * of the AAD data in the source buffer.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source
- * buffer that the hash will be computed on.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
- * of operation, this field specifies the length of
- * the AAD data in the source buffer.
- */
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
-
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
-
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
-
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
-
- void *user_data;
- /**< opaque pointer for user data */
-};
-
-
-/**
- * Reset the fields of a crypto operation to their default values.
- *
- * @param op The crypto operation to be reset.
- */
-static inline void
-__rte_crypto_op_reset(struct rte_crypto_op *op)
-{
- op->type = RTE_CRYPTO_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
-}
-
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_op_attach_session(struct rte_crypto_op *op,
- struct rte_cryptodev_session *sess)
-{
- op->session = sess;
- op->type = RTE_CRYPTO_OP_WITH_SESSION;
-}
+#include <rte_crypto_sym.h>
#ifdef __cplusplus
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
new file mode 100644
index 0000000..cb2b8f6
--- /dev/null
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -0,0 +1,613 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_SYM_H_
+#define _RTE_CRYPTO_SYM_H_
+
+/**
+ * @file rte_crypto_sym.h
+ *
+ * RTE Definitions for Symmetric Cryptography
+ *
+ * Defines symmetric cipher and authentication algorithms and modes, as well
+ * as supported symmetric crypto operation combinations.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+
+/** Symmetric Cipher Algorithms */
+enum rte_crypto_cipher_algorithm {
+ RTE_CRYPTO_CIPHER_NULL = 1,
+ /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
+
+ RTE_CRYPTO_CIPHER_3DES_CBC,
+ /**< Triple DES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_3DES_CTR,
+ /**< Triple DES algorithm in CTR mode */
+ RTE_CRYPTO_CIPHER_3DES_ECB,
+ /**< Triple DES algorithm in ECB mode */
+
+ RTE_CRYPTO_CIPHER_AES_CBC,
+ /**< AES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_AES_CCM,
+ /**< AES algorithm in CCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_CCM* element of the
+ * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_xform* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation
+ */
+ RTE_CRYPTO_CIPHER_AES_CTR,
+ /**< AES algorithm in Counter mode */
+ RTE_CRYPTO_CIPHER_AES_ECB,
+ /**< AES algorithm in ECB mode */
+ RTE_CRYPTO_CIPHER_AES_F8,
+ /**< AES algorithm in F8 mode */
+ RTE_CRYPTO_CIPHER_AES_GCM,
+ /**< AES algorithm in GCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_GCM* element of the
+ * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_setup_data* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_CIPHER_AES_XTS,
+ /**< AES algorithm in XTS mode */
+
+ RTE_CRYPTO_CIPHER_ARC4,
+ /**< (A)RC4 cipher algorithm */
+
+ RTE_CRYPTO_CIPHER_KASUMI_F8,
+ /**< Kasumi algorithm in F8 mode */
+
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ /**< SNOW3G algorithm in UEA2 mode */
+
+ RTE_CRYPTO_CIPHER_ZUC_EEA3
+ /**< ZUC algorithm in EEA3 mode */
+};
+
+/** Symmetric Cipher Direction */
+enum rte_crypto_cipher_operation {
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ /**< Encrypt cipher operation */
+ RTE_CRYPTO_CIPHER_OP_DECRYPT
+ /**< Decrypt cipher operation */
+};
+
+/**
+ * Symmetric Cipher Setup Data.
+ *
+ * This structure contains data relating to Cipher (Encryption and Decryption)
+ * use to create a session.
+ */
+struct rte_crypto_cipher_xform {
+ enum rte_crypto_cipher_operation op;
+ /**< This parameter determines if the cipher operation is an encrypt or
+ * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
+ * only encrypt operations are valid.
+ */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< Cipher algorithm */
+
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } key;
+ /**< Cipher key
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
+ * point to a concatenation of the AES encryption key followed by a
+ * keymask. As per RFC3711, the keymask should be padded with trailing
+ * bytes to match the length of the encryption key used.
+ *
+ * For AES-XTS mode of operation, two keys must be provided and
+ * key.data must point to the two keys concatenated together (Key1 ||
+ * Key2). The cipher key length will contain the total size of both
+ * keys.
+ *
+ * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
+ * 192 bits (24 bytes) or 256 bits (32 bytes).
+ *
+ * For the CCM mode of operation, the only supported key length is 128
+ * bits (16 bytes).
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
+ * should be set to the combined length of the encryption key and the
+ * keymask. Since the keymask and the encryption key are the same size,
+ * key.length should be set to 2 x the AES encryption key length.
+ *
+ * For the AES-XTS mode of operation:
+ * - Two keys must be provided and key.length refers to total length of
+ * the two keys.
+ * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
+ * - Both keys must have the same size.
+ **/
+};
+
+/** Symmetric Authentication / Hash Algorithms */
+enum rte_crypto_auth_algorithm {
+ RTE_CRYPTO_AUTH_NULL = 1,
+ /**< NULL hash algorithm. */
+
+ RTE_CRYPTO_AUTH_AES_CBC_MAC,
+ /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
+ RTE_CRYPTO_AUTH_AES_CCM,
+ /**< AES algorithm in CCM mode. This is an authenticated cipher. When
+ * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
+ * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
+ * set up the related rte_crypto_cipher_setup_data structure in the
+ * session context or the corresponding parameter in the crypto
+ * operation data structures op_params parameter MUST be set for a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_CMAC,
+ /**< AES CMAC algorithm. */
+ RTE_CRYPTO_AUTH_AES_GCM,
+ /**< AES algorithm in GCM mode. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_GMAC,
+ /**< AES GMAC algorithm. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ /**< AES XCBC algorithm. */
+
+ RTE_CRYPTO_AUTH_KASUMI_F9,
+ /**< Kasumi algorithm in F9 mode. */
+
+ RTE_CRYPTO_AUTH_MD5,
+ /**< MD5 algorithm */
+ RTE_CRYPTO_AUTH_MD5_HMAC,
+ /**< HMAC using MD5 algorithm */
+
+ RTE_CRYPTO_AUTH_SHA1,
+ /**< 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA1_HMAC,
+ /**< HMAC using 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224,
+ /**< 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224_HMAC,
+ /**< HMAC using 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256,
+ /**< 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256_HMAC,
+ /**< HMAC using 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384,
+ /**< 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384_HMAC,
+ /**< HMAC using 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512,
+ /**< 512 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512_HMAC,
+ /**< HMAC using 512 bit SHA algorithm. */
+
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ /**< SNOW3G algorithm in UIA2 mode. */
+
+ RTE_CRYPTO_AUTH_ZUC_EIA3,
+ /**< ZUC algorithm in EIA3 mode */
+};
+
+/** Symmetric Authentication / Hash Operations */
+enum rte_crypto_auth_operation {
+ RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
+ RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
+};
+
+/**
+ * Authentication / Hash transform data.
+ *
+ * This structure contains data relating to an authentication/hash crypto
+ * transforms. The fields op, algo and digest_length are common to all
+ * authentication transforms and MUST be set.
+ */
+struct rte_crypto_auth_xform {
+ enum rte_crypto_auth_operation op;
+ /**< Authentication operation type */
+ enum rte_crypto_auth_algorithm algo;
+ /**< Authentication algorithm selection */
+
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } key;
+ /**< Authentication key data.
+ * The authentication key length MUST be less than or equal to the
+ * block size of the algorithm. It is the callers responsibility to
+ * ensure that the key length is compliant with the standard being used
+ * (for example RFC 2104, FIPS 198a).
+ */
+
+ uint32_t digest_length;
+ /**< Length of the digest to be returned. If the verify option is set,
+ * this specifies the length of the digest to be compared for the
+ * session.
+ *
+ * If the value is less than the maximum length allowed by the hash,
+ * the result shall be truncated. If the value is greater than the
+ * maximum length allowed by the hash then an error will be generated
+ * by *rte_cryptodev_sym_session_create* or by the
+ * *rte_cryptodev_sym_enqueue_burst* if using session-less APIs.
+ */
+
+ uint32_t add_auth_data_length;
+ /**< The length of the additional authenticated data (AAD) in bytes.
+ * The maximum permitted value is 240 bytes, unless otherwise specified
+ * below.
+ *
+ * This field must be specified when the hash algorithm is one of the
+ * following:
+ *
+ * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
+ * length of the IV (which should be 16).
+ *
+ * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
+ * the length of the Additional Authenticated Data (called A, in NIST
+ * SP800-38D).
+ *
+ * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
+ * the length of the associated data (called A, in NIST SP800-38C).
+ * Note that this does NOT include the length of any padding, or the
+ * 18 bytes reserved at the start of the above field to store the
+ * block B0 and the encoded length. The maximum permitted value in
+ * this case is 222 bytes.
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
+ * this field is not used and should be set to 0. Instead the length
+ * of the AAD data is specified in the message length to hash field of
+ * the rte_crypto_sym_op_data structure.
+ */
+};
+
+/** Crypto transformation types */
+enum rte_crypto_sym_xform_type {
+ RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
+ RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */
+ RTE_CRYPTO_SYM_XFORM_CIPHER /**< Cipher xform */
+};
+
+/**
+ * Symmetric crypto transform structure.
+ *
+ * This is used to specify the crypto transforms required, multiple transforms
+ * can be chained together to specify a chain transforms such as authentication
+ * then cipher, or cipher then authentication. Each transform structure can
+ * hold a single transform, the type field is used to specify which transform
+ * is contained within the union
+ */
+struct rte_crypto_sym_xform {
+ struct rte_crypto_sym_xform *next;
+ /**< next xform in chain */
+ enum rte_crypto_sym_xform_type type
+ ; /**< xform type */
+ union {
+ struct rte_crypto_auth_xform auth;
+ /**< Authentication / hash xform */
+ struct rte_crypto_cipher_xform cipher;
+ /**< Cipher xform */
+ };
+};
+
+/**
+ * Crypto operation session type. This is used to specify whether a crypto
+ * operation has session structure attached for immutable parameters or if all
+ * operation information is included in the operation data structure.
+ */
+enum rte_crypto_sym_op_sess_type {
+ RTE_CRYPTO_SYM_OP_WITH_SESSION, /**< Session based crypto operation */
+ RTE_CRYPTO_SYM_OP_SESSIONLESS /**< Session-less crypto operation */
+};
+
+
+/** Status of crypto operation */
+enum rte_crypto_op_status {
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ /**< Operation completed successfully */
+ RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
+ /**< Operation not yet submitted to a cryptodev */
+ RTE_CRYPTO_OP_STATUS_ENQUEUED,
+ /**< Operation is enqueued on device */
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
+ /**< Authentication verification failed */
+ RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
+ /**< Operation failed due to invalid arguments in request */
+ RTE_CRYPTO_OP_STATUS_ERROR,
+ /**< Error handling operation */
+};
+
+/**
+ * Cryptographic Operation Data.
+ *
+ * This structure contains data relating to performing cryptographic processing
+ * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
+ * call for performing cipher, hash, or a combined hash and cipher operations.
+ */
+struct rte_crypto_sym_op {
+ enum rte_crypto_sym_op_sess_type type;
+ enum rte_crypto_op_status status;
+
+ struct {
+ struct rte_mbuf *m; /**< Destination mbuf */
+ uint8_t offset; /**< Data offset */
+ } dst;
+
+ union {
+ struct rte_cryptodev_sym_session *session;
+ /**< Handle for the initialised session context */
+ struct rte_crypto_sym_xform *xform;
+ /**< Session-less API crypto operation parameters */
+ };
+
+ struct {
+ struct {
+ uint32_t offset;
+ /**< Starting point for cipher processing, specified
+ * as number of bytes from start of data in the source
+ * buffer. The result of the cipher operation will be
+ * written back into the output buffer starting at
+ * this location.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source buffer
+ * on which the cryptographic operation will be
+ * computed. This must be a multiple of the block size
+ * if a block cipher is being used. This is also the
+ * same as the result length.
+ *
+ * @note
+ * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
+ * this value should not include the length of the
+ * padding or the length of the MAC; the driver will
+ * compute the actual number of bytes over which the
+ * encryption will occur, which will include these
+ * values.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
+ * field should be set to 0.
+ */
+ } to_cipher; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint32_t offset;
+ /**< Starting point for hash processing, specified as
+ * number of bytes from start of packet in source
+ * buffer.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
+ * mode of operation, this field specifies the start
+ * of the AAD data in the source buffer.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source
+ * buffer that the hash will be computed on.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
+ * of operation, this field specifies the length of
+ * the AAD data in the source buffer.
+ */
+ } to_hash; /**< Data offsets and length for authentication */
+ } data; /**< Details of data to be operated on */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
+ * Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length is 96
+ * bits) or J0 (for other sizes), where J0 is as defined by
+ * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
+ * needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the nonce
+ * should be written starting at &iv[1] (to allow space for the
+ * implementation to write in the flags in the first byte).
+ * Note that a full 16 bytes should be allocated, even though
+ * the length field will have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
+ * 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD be
+ * 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ size_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the length of the
+ * IV (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length of the
+ * counter (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
+ * which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce, which can
+ * be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result should be
+ * inserted (in the case of digest generation) or where the
+ * purported digest exists (in the case of digest
+ * verification).
+ *
+ * At session creation time, the client specified the digest
+ * result length with the digest_length member of the @ref
+ * rte_crypto_auth_xform structure. For physical crypto
+ * devices the caller must allocate at least digest_length of
+ * physically contiguous memory at this location.
+ *
+ * For digest generation, the digest result will overwrite
+ * any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is understood
+ * to be in the destination buffer for digest generation, and
+ * in the source buffer for digest verification. The location
+ * of the digest result in this case is immediately following
+ * the region over which the digest is computed.
+ */
+ phys_addr_t phys_addr; /**< Physical address of digest */
+ uint32_t length; /**< Length of digest */
+ } digest; /**< Digest parameters */
+
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD) needed for
+ * authenticated cipher mechanisms (CCM and GCM), and to the IV
+ * for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is set up
+ * for the session in the @ref rte_crypto_auth_xform structure
+ * as part of the @ref rte_cryptodev_sym_session_create function
+ * call. This length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
+ * caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset of one
+ * byte into the array, leaving room for the implementation
+ * to write in the flags to the first byte.
+ *
+ * - the additional authentication data itself should be
+ * written starting at an offset of 18 bytes into the array,
+ * leaving room for the length encoding in the first two
+ * bytes of the second block.
+ *
+ * - the array should be big enough to hold the above fields,
+ * plus any padding to round this up to the nearest multiple
+ * of the block size (16 bytes). Padding will be added by
+ * the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus any
+ * space to round this up to the nearest multiple of the
+ * block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set to 0.
+ * Instead the AAD data should be placed in the source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint32_t length; /**< Length of digest */
+ } additional_auth;
+ /**< Additional authentication parameters */
+
+ struct rte_mempool *pool;
+ /**< mempool used to allocate crypto op */
+
+ void *user_data;
+ /**< opaque pointer for user data */
+};
+
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ */
+static inline void
+__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
+{
+ op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
+ op->dst.m = NULL;
+ op->dst.offset = 0;
+}
+
+/** Attach a session to a crypto operation */
+static inline void
+rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ op->session = sess;
+ op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTO_SYM_H_ */
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 1106888..f32ddd4 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -584,8 +584,8 @@ rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id);
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id);
int
rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
@@ -622,8 +622,10 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
}
/* Setup Session mempool for device */
- return rte_crypto_session_pool_create(dev, config->session_mp.nb_objs,
- config->session_mp.cache_size, config->socket_id);
+ return rte_cryptodev_sym_session_pool_create(dev,
+ config->session_mp.nb_objs,
+ config->session_mp.cache_size,
+ config->socket_id);
}
@@ -953,12 +955,12 @@ rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
static void
-rte_crypto_session_init(struct rte_mempool *mp,
+rte_cryptodev_sym_session_init(struct rte_mempool *mp,
void *opaque_arg,
void *_sess,
__rte_unused unsigned i)
{
- struct rte_cryptodev_session *sess = _sess;
+ struct rte_cryptodev_sym_session *sess = _sess;
struct rte_cryptodev *dev = opaque_arg;
memset(sess, 0, mp->elt_size);
@@ -972,8 +974,8 @@ rte_crypto_session_init(struct rte_mempool *mp,
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id)
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id)
{
char mp_name[RTE_CRYPTODEV_NAME_MAX_LEN];
unsigned priv_sess_size;
@@ -993,7 +995,7 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
return -ENOMEM;
}
- unsigned elt_size = sizeof(struct rte_cryptodev_session) +
+ unsigned elt_size = sizeof(struct rte_cryptodev_sym_session) +
priv_sess_size;
dev->data->session_pool = rte_mempool_lookup(mp_name);
@@ -1017,7 +1019,8 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
0, /* private data size */
NULL, /* obj initialization constructor */
NULL, /* obj initialization constructor arg */
- rte_crypto_session_init, /* obj constructor */
+ rte_cryptodev_sym_session_init,
+ /**< obj constructor*/
dev, /* obj constructor arg */
socket_id, /* socket id */
0); /* flags */
@@ -1032,11 +1035,12 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
return 0;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform)
{
struct rte_cryptodev *dev;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
void *_sess;
if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
@@ -1052,7 +1056,7 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
return NULL;
}
- sess = (struct rte_cryptodev_session *)_sess;
+ sess = (struct rte_cryptodev_sym_session *)_sess;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_configure, NULL);
if (dev->dev_ops->session_configure(dev, xform, sess->_private) ==
@@ -1068,8 +1072,9 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
return sess;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id, struct rte_cryptodev_session *sess)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess)
{
struct rte_cryptodev *dev;
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 892375d..732e2b9 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -1,6 +1,6 @@
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,14 +57,14 @@ extern "C" {
/**< Null crypto PMD device name */
#define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd")
/**< AES-NI Multi buffer PMD device name */
-#define CRYPTODEV_NAME_QAT_PMD ("cryptodev_qat_pmd")
-/**< Intel QAT PMD device name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd")
+/**< Intel QAT SYM PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
- RTE_CRYPTODEV_QAT_PMD, /**< QAT PMD */
+ RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT SYM PMD */
};
/* Logging Macros */
@@ -99,8 +99,11 @@ struct rte_cryptodev_info {
unsigned max_nb_queue_pairs;
/**< Maximum number of queues pairs supported by device. */
- unsigned max_nb_sessions;
- /**< Maximum number of sessions supported by device. */
+
+ struct {
+ unsigned max_nb_sessions;
+ /**< Maximum number of sessions supported by device. */
+ } sym;
};
#define RTE_CRYPTODEV_DETACHED (0)
@@ -130,17 +133,6 @@ struct rte_cryptodev_qp_conf {
typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
enum rte_cryptodev_event_type event, void *cb_arg);
-#ifdef RTE_CRYPTODEV_PERF
-/**
- * Crypto Device performance counter statistics structure. This structure is
- * used for RDTSC counters for measuring crypto operations.
- */
-struct rte_cryptodev_perf_stats {
- uint64_t t_accumlated; /**< Accumulated time processing operation */
- uint64_t t_min; /**< Max time */
- uint64_t t_max; /**< Min time */
-};
-#endif
/** Crypto Device statistics */
struct rte_cryptodev_stats {
@@ -153,29 +145,8 @@ struct rte_cryptodev_stats {
/**< Total error count on operations enqueued */
uint64_t dequeue_err_count;
/**< Total error count on operations dequeued */
+};
-#ifdef RTE_CRYPTODEV_DETAILED_STATS
- struct {
- uint64_t encrypt_ops; /**< Count of encrypt operations */
- uint64_t encrypt_bytes; /**< Number of bytes encrypted */
-
- uint64_t decrypt_ops; /**< Count of decrypt operations */
- uint64_t decrypt_bytes; /**< Number of bytes decrypted */
- } cipher; /**< Cipher operations stats */
-
- struct {
- uint64_t generate_ops; /**< Count of generate operations */
- uint64_t bytes_hashed; /**< Number of bytes hashed */
-
- uint64_t verify_ops; /**< Count of verify operations */
- uint64_t bytes_verified;/**< Number of bytes verified */
- } hash; /**< Hash operations stats */
-#endif
-
-#ifdef RTE_CRYPTODEV_PERF
- struct rte_cryptodev_perf_stats op_perf; /**< Operations stats */
-#endif
-} __rte_cache_aligned;
/**
* Create a virtual crypto device
@@ -607,6 +578,23 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
}
+/** Cryptodev symmetric crypto session */
+struct rte_cryptodev_sym_session {
+ struct {
+ uint8_t dev_id;
+ /**< Device Id */
+ enum rte_cryptodev_type type;
+ /** Crypto Device type session created on */
+ struct rte_mempool *mp;
+ /**< Mempool session allocated from */
+ } __rte_aligned(8);
+ /**< Public symmetric session details */
+
+ char _private[0];
+ /**< Private session material */
+};
+
+
/**
* Initialise a session for symmetric cryptographic operations.
*
@@ -628,9 +616,9 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
* @return
* Pointer to the created session or NULL
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id,
- struct rte_crypto_xform *xform);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform);
/**
@@ -638,15 +626,15 @@ rte_cryptodev_session_create(uint8_t dev_id,
*
* @param dev_id The device identifier.
* @param session Session pointer previously allocated by
- * *rte_cryptodev_session_create*.
+ * *rte_cryptodev_sym_session_create*.
*
* @return
* NULL on successful freeing of session.
* Session pointer on failure to free session.
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id,
- struct rte_cryptodev_session *session);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *session);
#ifdef __cplusplus
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index 8270afa..7d049ea 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -1,6 +1,6 @@
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,11 +56,6 @@ extern "C" {
#include "rte_crypto.h"
#include "rte_cryptodev.h"
-struct rte_cryptodev_stats;
-struct rte_cryptodev_info;
-struct rte_cryptodev_qp_conf;
-
-enum rte_cryptodev_event_type;
#ifdef RTE_LIBRTE_CRYPTODEV_DEBUG
#define RTE_PMD_DEBUG_TRACE(...) \
@@ -80,7 +75,6 @@ struct rte_cryptodev_session {
};
struct rte_cryptodev_driver;
-struct rte_cryptodev;
/**
* Initialisation function of a crypto driver invoked for each matching
@@ -364,7 +358,7 @@ typedef uint32_t (*cryptodev_queue_pair_count_t)(struct rte_cryptodev *dev);
* - On success returns a pointer to a rte_mempool
* - On failure returns a NULL pointer
*/
-typedef int (*cryptodev_create_session_pool_t)(
+typedef int (*cryptodev_sym_create_session_pool_t)(
struct rte_cryptodev *dev, unsigned nb_objs,
unsigned obj_cache_size, int socket_id);
@@ -378,7 +372,7 @@ typedef int (*cryptodev_create_session_pool_t)(
* - On success returns the size of the session structure for device
* - On failure returns 0
*/
-typedef unsigned (*cryptodev_get_session_private_size_t)(
+typedef unsigned (*cryptodev_sym_get_session_private_size_t)(
struct rte_cryptodev *dev);
/**
@@ -392,7 +386,7 @@ typedef unsigned (*cryptodev_get_session_private_size_t)(
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
+typedef void (*cryptodev_sym_initialize_session_t)(struct rte_mempool *mempool,
void *session_private);
/**
@@ -406,14 +400,14 @@ typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void * (*cryptodev_configure_session_t)(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+typedef void * (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
/**
* Free Crypto session.
* @param session Cryptodev session structure to free
*/
-typedef void (*cryptodev_free_session_t)(struct rte_cryptodev *dev,
+typedef void (*cryptodev_sym_free_session_t)(struct rte_cryptodev *dev,
void *session_private);
@@ -427,9 +421,9 @@ struct rte_cryptodev_ops {
cryptodev_info_get_t dev_infos_get; /**< Get device info. */
cryptodev_stats_get_t stats_get;
- /**< Get generic device statistics. */
+ /**< Get device statistics. */
cryptodev_stats_reset_t stats_reset;
- /**< Reset generic device statistics. */
+ /**< Reset device statistics. */
cryptodev_queue_pair_setup_t queue_pair_setup;
/**< Set up a device queue pair. */
@@ -442,13 +436,13 @@ struct rte_cryptodev_ops {
cryptodev_queue_pair_count_t queue_pair_count;
/**< Get count of the queue pairs. */
- cryptodev_get_session_private_size_t session_get_size;
+ cryptodev_sym_get_session_private_size_t session_get_size;
/**< Return private session. */
- cryptodev_initialize_session_t session_initialize;
+ cryptodev_sym_initialize_session_t session_initialize;
/**< Initialization function for private session data */
- cryptodev_configure_session_t session_configure;
+ cryptodev_sym_configure_session_t session_configure;
/**< Configure a Crypto session. */
- cryptodev_free_session_t session_clear;
+ cryptodev_sym_free_session_t session_clear;
/**< Clear a Crypto sessions private data. */
};
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
index 4345f06..926ab64 100644
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ b/lib/librte_mbuf_offload/rte_mbuf_offload.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -64,7 +64,7 @@
enum rte_mbuf_ol_op_type {
RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
/**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO
+ RTE_PKTMBUF_OL_CRYPTO_SYM
/**< Crypto offload operation */
};
@@ -81,7 +81,7 @@ struct rte_mbuf_offload {
enum rte_mbuf_ol_op_type type; /**< offload type */
union {
- struct rte_crypto_op crypto; /**< Crypto operation */
+ struct rte_crypto_sym_op crypto; /**< Crypto operation */
} op;
};
@@ -191,8 +191,8 @@ __rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
ol->type = type;
switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO:
- __rte_crypto_op_reset(&ol->op.crypto); break;
+ case RTE_PKTMBUF_OL_CRYPTO_SYM:
+ __rte_crypto_sym_op_reset(&ol->op.crypto); break;
default:
break;
}
@@ -275,24 +275,24 @@ __rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
* - On success returns pointer to first crypto xform in crypto operations chain
* - On failure returns NULL
*/
-static inline struct rte_crypto_xform *
-rte_pktmbuf_offload_alloc_crypto_xforms(struct rte_mbuf_offload *ol,
+static inline struct rte_crypto_sym_xform *
+rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
unsigned nb_xforms)
{
- struct rte_crypto_xform *xform;
+ struct rte_crypto_sym_xform *xform;
void *priv_data;
uint16_t size;
- size = sizeof(struct rte_crypto_xform) * nb_xforms;
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
if (priv_data == NULL)
return NULL;
- ol->op.crypto.xform = xform = (struct rte_crypto_xform *)priv_data;
+ ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
do {
- xform->type = RTE_CRYPTO_XFORM_NOT_SPECIFIED;
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
} while (xform);
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v2 2/2] cryptodev: change burst API to be crypto op oriented
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 0/2] cryptodev API changes Declan Doherty
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 1/2] cryptodev: API tidy and changes to support future extensions Declan Doherty
@ 2016-02-19 11:01 ` Declan Doherty
2016-02-22 11:17 ` Trahe, Fiona
` (2 more replies)
2016-02-26 17:30 ` [dpdk-dev] [PATCH v3 0/2] cryptodev API changes Declan Doherty
2016-03-15 7:48 ` [dpdk-dev] [PATCH v2 " Cao, Min
3 siblings, 3 replies; 62+ messages in thread
From: Declan Doherty @ 2016-02-19 11:01 UTC (permalink / raw)
To: dev
This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts
rte_crypto_op's rather than the current implementation which operates on
rte_mbuf bursts, this simplifies the burst processing in the crypto PMDs and the
use of crypto operations in general.
This change set also continues the separation of the symmetric operation parameters
from the more general operation parameters, this will simplify the integration of
asymmetric crypto operations in the future.
As well as the changes to the crypto APIs this patch adds functions for managing
rte_crypto_op pools to the cryptodev API. It modifies the existing PMDs, unit
tests and sample application to work with the modified APIs.
Finally this change set removes the now unused rte_mbuf_offload library.
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
---
MAINTAINERS | 4 -
app/test/test_cryptodev.c | 800 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 253 +++----
config/common_bsdapp | 7 -
config/common_linuxapp | 11 +-
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 171 +++--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 12 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 2 +-
drivers/crypto/qat/qat_crypto.c | 125 ++--
drivers/crypto/qat/qat_crypto.h | 12 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 4 +-
examples/l2fwd-crypto/main.c | 264 ++++---
lib/Makefile | 1 -
lib/librte_cryptodev/rte_crypto.h | 345 +++++++++
lib/librte_cryptodev/rte_crypto_sym.h | 377 +++++-----
lib/librte_cryptodev/rte_cryptodev.c | 74 ++
lib/librte_cryptodev/rte_cryptodev.h | 107 +--
lib/librte_cryptodev/rte_cryptodev_version.map | 1 +
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 307 --------
.../rte_mbuf_offload_version.map | 7 -
25 files changed, 1543 insertions(+), 1509 deletions(-)
delete mode 100644 lib/librte_mbuf_offload/Makefile
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
diff --git a/MAINTAINERS b/MAINTAINERS
index 628bc05..8d84dda 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -222,10 +222,6 @@ F: lib/librte_mbuf/
F: doc/guides/prog_guide/mbuf_lib.rst
F: app/test/test_mbuf.c
-Packet buffer offload - EXPERIMENTAL
-M: Declan Doherty <declan.doherty@intel.com>
-F: lib/librte_mbuf_offload/
-
Ethernet API
M: Thomas Monjalon <thomas.monjalon@6wind.com>
F: lib/librte_ether/
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 951b443..29e4b29 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -35,7 +35,6 @@
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#include <rte_mbuf_offload.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
@@ -48,7 +47,7 @@ static enum rte_cryptodev_type gbl_cryptodev_type;
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
struct rte_cryptodev_config conf;
struct rte_cryptodev_qp_conf qp_conf;
@@ -62,8 +61,7 @@ struct crypto_unittest_params {
struct rte_cryptodev_sym_session *sess;
- struct rte_mbuf_offload *ol;
- struct rte_crypto_sym_op *op;
+ struct rte_crypto_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -104,7 +102,7 @@ setup_test_string(struct rte_mempool *mpool,
return m;
}
-#if HEX_DUMP
+#ifdef HEX_DUMP
static void
hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
{
@@ -112,27 +110,29 @@ hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
}
#endif
-static struct rte_mbuf *
-process_crypto_request(uint8_t dev_id, struct rte_mbuf *ibuf)
+static struct rte_crypto_op *
+process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)
{
- struct rte_mbuf *obuf = NULL;
-#if HEX_DUMP
+#ifdef HEX_DUMP
hexdump_mbuf_data(stdout, "Enqueued Packet", ibuf);
#endif
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &ibuf, 1) != 1) {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
printf("Error sending packet for encryption");
return NULL;
}
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &obuf, 1) == 0)
+
+ op = NULL;
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
rte_pause();
-#if HEX_DUMP
+#ifdef HEX_DUMP
if (obuf)
hexdump_mbuf_data(stdout, "Dequeued Packet", obuf);
#endif
- return obuf;
+ return op;
}
static struct crypto_testsuite_params testsuite_params = { NULL };
@@ -162,13 +162,14 @@ testsuite_setup(void)
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "MBUF_OFFLOAD_POOL",
+ ts_params->op_mpool = rte_crypto_op_pool_create(
+ "MBUF_CRYPTO_SYM_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_sym_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -253,10 +254,9 @@ testsuite_teardown(void)
rte_mempool_count(ts_params->mbuf_pool));
}
-
- if (ts_params->mbuf_ol_pool != NULL) {
+ if (ts_params->op_mpool != NULL) {
RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n",
- rte_mempool_count(ts_params->mbuf_ol_pool));
+ rte_mempool_count(ts_params->op_mpool));
}
}
@@ -326,8 +326,8 @@ ut_teardown(void)
}
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
/*
* free mbuf - both obuf and ibuf are usually the same,
@@ -680,76 +680,77 @@ static uint8_t aes_cbc_iv[] = {
/* ***** AES-CBC / HMAC-SHA1 Hash Tests ***** */
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_ciphertext[] = {
- 0x8B, 0X4D, 0XDA, 0X1B, 0XCF, 0X04, 0XA0, 0X31,
- 0XB4, 0XBF, 0XBD, 0X68, 0X43, 0X20, 0X7E, 0X76,
- 0XB1, 0X96, 0X8B, 0XA2, 0X7C, 0XA2, 0X83, 0X9E,
- 0X39, 0X5A, 0X2F, 0X7E, 0X92, 0XB4, 0X48, 0X1A,
- 0X3F, 0X6B, 0X5D, 0XDF, 0X52, 0X85, 0X5F, 0X8E,
- 0X42, 0X3C, 0XFB, 0XE9, 0X1A, 0X24, 0XD6, 0X08,
- 0XDD, 0XFD, 0X16, 0XFB, 0XE9, 0X55, 0XEF, 0XF0,
- 0XA0, 0X8D, 0X13, 0XAB, 0X81, 0XC6, 0X90, 0X01,
- 0XB5, 0X18, 0X84, 0XB3, 0XF6, 0XE6, 0X11, 0X57,
- 0XD6, 0X71, 0XC6, 0X3C, 0X3F, 0X2F, 0X33, 0XEE,
- 0X24, 0X42, 0X6E, 0XAC, 0X0B, 0XCA, 0XEC, 0XF9,
- 0X84, 0XF8, 0X22, 0XAA, 0X60, 0XF0, 0X32, 0XA9,
- 0X75, 0X75, 0X3B, 0XCB, 0X70, 0X21, 0X0A, 0X8D,
- 0X0F, 0XE0, 0XC4, 0X78, 0X2B, 0XF8, 0X97, 0XE3,
- 0XE4, 0X26, 0X4B, 0X29, 0XDA, 0X88, 0XCD, 0X46,
- 0XEC, 0XAA, 0XF9, 0X7F, 0XF1, 0X15, 0XEA, 0XC3,
- 0X87, 0XE6, 0X31, 0XF2, 0XCF, 0XDE, 0X4D, 0X80,
- 0X70, 0X91, 0X7E, 0X0C, 0XF7, 0X26, 0X3A, 0X92,
- 0X4F, 0X18, 0X83, 0XC0, 0X8F, 0X59, 0X01, 0XA5,
- 0X88, 0XD1, 0XDB, 0X26, 0X71, 0X27, 0X16, 0XF5,
- 0XEE, 0X10, 0X82, 0XAC, 0X68, 0X26, 0X9B, 0XE2,
- 0X6D, 0XD8, 0X9A, 0X80, 0XDF, 0X04, 0X31, 0XD5,
- 0XF1, 0X35, 0X5C, 0X3B, 0XDD, 0X9A, 0X65, 0XBA,
- 0X58, 0X34, 0X85, 0X61, 0X1C, 0X42, 0X10, 0X76,
- 0X73, 0X02, 0X42, 0XC9, 0X23, 0X18, 0X8E, 0XB4,
- 0X6F, 0XB4, 0XA3, 0X54, 0X6E, 0X88, 0X3B, 0X62,
- 0X7C, 0X02, 0X8D, 0X4C, 0X9F, 0XC8, 0X45, 0XF4,
- 0XC9, 0XDE, 0X4F, 0XEB, 0X22, 0X83, 0X1B, 0XE4,
- 0X49, 0X37, 0XE4, 0XAD, 0XE7, 0XCD, 0X21, 0X54,
- 0XBC, 0X1C, 0XC2, 0X04, 0X97, 0XB4, 0X10, 0X61,
- 0XF0, 0XE4, 0XEF, 0X27, 0X63, 0X3A, 0XDA, 0X91,
- 0X41, 0X25, 0X62, 0X1C, 0X5C, 0XB6, 0X38, 0X4A,
- 0X88, 0X71, 0X59, 0X5A, 0X8D, 0XA0, 0X09, 0XAF,
- 0X72, 0X94, 0XD7, 0X79, 0X5C, 0X60, 0X7C, 0X8F,
- 0X4C, 0XF5, 0XD9, 0XA1, 0X39, 0X6D, 0X81, 0X28,
- 0XEF, 0X13, 0X28, 0XDF, 0XF5, 0X3E, 0XF7, 0X8E,
- 0X09, 0X9C, 0X78, 0X18, 0X79, 0XB8, 0X68, 0XD7,
- 0XA8, 0X29, 0X62, 0XAD, 0XDE, 0XE1, 0X61, 0X76,
- 0X1B, 0X05, 0X16, 0XCD, 0XBF, 0X02, 0X8E, 0XA6,
- 0X43, 0X6E, 0X92, 0X55, 0X4F, 0X60, 0X9C, 0X03,
- 0XB8, 0X4F, 0XA3, 0X02, 0XAC, 0XA8, 0XA7, 0X0C,
- 0X1E, 0XB5, 0X6B, 0XF8, 0XC8, 0X4D, 0XDE, 0XD2,
- 0XB0, 0X29, 0X6E, 0X40, 0XE6, 0XD6, 0XC9, 0XE6,
- 0XB9, 0X0F, 0XB6, 0X63, 0XF5, 0XAA, 0X2B, 0X96,
- 0XA7, 0X16, 0XAC, 0X4E, 0X0A, 0X33, 0X1C, 0XA6,
- 0XE6, 0XBD, 0X8A, 0XCF, 0X40, 0XA9, 0XB2, 0XFA,
- 0X63, 0X27, 0XFD, 0X9B, 0XD9, 0XFC, 0XD5, 0X87,
- 0X8D, 0X4C, 0XB6, 0XA4, 0XCB, 0XE7, 0X74, 0X55,
- 0XF4, 0XFB, 0X41, 0X25, 0XB5, 0X4B, 0X0A, 0X1B,
- 0XB1, 0XD6, 0XB7, 0XD9, 0X47, 0X2A, 0XC3, 0X98,
- 0X6A, 0XC4, 0X03, 0X73, 0X1F, 0X93, 0X6E, 0X53,
- 0X19, 0X25, 0X64, 0X15, 0X83, 0XF9, 0X73, 0X2A,
- 0X74, 0XB4, 0X93, 0X69, 0XC4, 0X72, 0XFC, 0X26,
- 0XA2, 0X9F, 0X43, 0X45, 0XDD, 0XB9, 0XEF, 0X36,
- 0XC8, 0X3A, 0XCD, 0X99, 0X9B, 0X54, 0X1A, 0X36,
- 0XC1, 0X59, 0XF8, 0X98, 0XA8, 0XCC, 0X28, 0X0D,
- 0X73, 0X4C, 0XEE, 0X98, 0XCB, 0X7C, 0X58, 0X7E,
- 0X20, 0X75, 0X1E, 0XB7, 0XC9, 0XF8, 0XF2, 0X0E,
- 0X63, 0X9E, 0X05, 0X78, 0X1A, 0XB6, 0XA8, 0X7A,
- 0XF9, 0X98, 0X6A, 0XA6, 0X46, 0X84, 0X2E, 0XF6,
- 0X4B, 0XDC, 0X9B, 0X8F, 0X9B, 0X8F, 0XEE, 0XB4,
- 0XAA, 0X3F, 0XEE, 0XC0, 0X37, 0X27, 0X76, 0XC7,
- 0X95, 0XBB, 0X26, 0X74, 0X69, 0X12, 0X7F, 0XF1,
- 0XBB, 0XFF, 0XAE, 0XB5, 0X99, 0X6E, 0XCB, 0X0C
+ 0x8B, 0x4D, 0xDA, 0x1B, 0xCF, 0x04, 0xA0, 0x31,
+ 0xB4, 0xBF, 0xBD, 0x68, 0x43, 0x20, 0x7E, 0x76,
+ 0xB1, 0x96, 0x8B, 0xA2, 0x7C, 0xA2, 0x83, 0x9E,
+ 0x39, 0x5A, 0x2F, 0x7E, 0x92, 0xB4, 0x48, 0x1A,
+ 0x3F, 0x6B, 0x5D, 0xDF, 0x52, 0x85, 0x5F, 0x8E,
+ 0x42, 0x3C, 0xFB, 0xE9, 0x1A, 0x24, 0xD6, 0x08,
+ 0xDD, 0xFD, 0x16, 0xFB, 0xE9, 0x55, 0xEF, 0xF0,
+ 0xA0, 0x8D, 0x13, 0xAB, 0x81, 0xC6, 0x90, 0x01,
+ 0xB5, 0x18, 0x84, 0xB3, 0xF6, 0xE6, 0x11, 0x57,
+ 0xD6, 0x71, 0xC6, 0x3C, 0x3F, 0x2F, 0x33, 0xEE,
+ 0x24, 0x42, 0x6E, 0xAC, 0x0B, 0xCA, 0xEC, 0xF9,
+ 0x84, 0xF8, 0x22, 0xAA, 0x60, 0xF0, 0x32, 0xA9,
+ 0x75, 0x75, 0x3B, 0xCB, 0x70, 0x21, 0x0A, 0x8D,
+ 0x0F, 0xE0, 0xC4, 0x78, 0x2B, 0xF8, 0x97, 0xE3,
+ 0xE4, 0x26, 0x4B, 0x29, 0xDA, 0x88, 0xCD, 0x46,
+ 0xEC, 0xAA, 0xF9, 0x7F, 0xF1, 0x15, 0xEA, 0xC3,
+ 0x87, 0xE6, 0x31, 0xF2, 0xCF, 0xDE, 0x4D, 0x80,
+ 0x70, 0x91, 0x7E, 0x0C, 0xF7, 0x26, 0x3A, 0x92,
+ 0x4F, 0x18, 0x83, 0xC0, 0x8F, 0x59, 0x01, 0xA5,
+ 0x88, 0xD1, 0xDB, 0x26, 0x71, 0x27, 0x16, 0xF5,
+ 0xEE, 0x10, 0x82, 0xAC, 0x68, 0x26, 0x9B, 0xE2,
+ 0x6D, 0xD8, 0x9A, 0x80, 0xDF, 0x04, 0x31, 0xD5,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x73, 0x02, 0x42, 0xC9, 0x23, 0x18, 0x8E, 0xB4,
+ 0x6F, 0xB4, 0xA3, 0x54, 0x6E, 0x88, 0x3B, 0x62,
+ 0x7C, 0x02, 0x8D, 0x4C, 0x9F, 0xC8, 0x45, 0xF4,
+ 0xC9, 0xDE, 0x4F, 0xEB, 0x22, 0x83, 0x1B, 0xE4,
+ 0x49, 0x37, 0xE4, 0xAD, 0xE7, 0xCD, 0x21, 0x54,
+ 0xBC, 0x1C, 0xC2, 0x04, 0x97, 0xB4, 0x10, 0x61,
+ 0xF0, 0xE4, 0xEF, 0x27, 0x63, 0x3A, 0xDA, 0x91,
+ 0x41, 0x25, 0x62, 0x1C, 0x5C, 0xB6, 0x38, 0x4A,
+ 0x88, 0x71, 0x59, 0x5A, 0x8D, 0xA0, 0x09, 0xAF,
+ 0x72, 0x94, 0xD7, 0x79, 0x5C, 0x60, 0x7C, 0x8F,
+ 0x4C, 0xF5, 0xD9, 0xA1, 0x39, 0x6D, 0x81, 0x28,
+ 0xEF, 0x13, 0x28, 0xDF, 0xF5, 0x3E, 0xF7, 0x8E,
+ 0x09, 0x9C, 0x78, 0x18, 0x79, 0xB8, 0x68, 0xD7,
+ 0xA8, 0x29, 0x62, 0xAD, 0xDE, 0xE1, 0x61, 0x76,
+ 0x1B, 0x05, 0x16, 0xCD, 0xBF, 0x02, 0x8E, 0xA6,
+ 0x43, 0x6E, 0x92, 0x55, 0x4F, 0x60, 0x9C, 0x03,
+ 0xB8, 0x4F, 0xA3, 0x02, 0xAC, 0xA8, 0xA7, 0x0C,
+ 0x1E, 0xB5, 0x6B, 0xF8, 0xC8, 0x4D, 0xDE, 0xD2,
+ 0xB0, 0x29, 0x6E, 0x40, 0xE6, 0xD6, 0xC9, 0xE6,
+ 0xB9, 0x0F, 0xB6, 0x63, 0xF5, 0xAA, 0x2B, 0x96,
+ 0xA7, 0x16, 0xAC, 0x4E, 0x0A, 0x33, 0x1C, 0xA6,
+ 0xE6, 0xBD, 0x8A, 0xCF, 0x40, 0xA9, 0xB2, 0xFA,
+ 0x63, 0x27, 0xFD, 0x9B, 0xD9, 0xFC, 0xD5, 0x87,
+ 0x8D, 0x4C, 0xB6, 0xA4, 0xCB, 0xE7, 0x74, 0x55,
+ 0xF4, 0xFB, 0x41, 0x25, 0xB5, 0x4B, 0x0A, 0x1B,
+ 0xB1, 0xD6, 0xB7, 0xD9, 0x47, 0x2A, 0xC3, 0x98,
+ 0x6A, 0xC4, 0x03, 0x73, 0x1F, 0x93, 0x6E, 0x53,
+ 0x19, 0x25, 0x64, 0x15, 0x83, 0xF9, 0x73, 0x2A,
+ 0x74, 0xB4, 0x93, 0x69, 0xC4, 0x72, 0xFC, 0x26,
+ 0xA2, 0x9F, 0x43, 0x45, 0xDD, 0xB9, 0xEF, 0x36,
+ 0xC8, 0x3A, 0xCD, 0x99, 0x9B, 0x54, 0x1A, 0x36,
+ 0xC1, 0x59, 0xF8, 0x98, 0xA8, 0xCC, 0x28, 0x0D,
+ 0x73, 0x4C, 0xEE, 0x98, 0xCB, 0x7C, 0x58, 0x7E,
+ 0x20, 0x75, 0x1E, 0xB7, 0xC9, 0xF8, 0xF2, 0x0E,
+ 0x63, 0x9E, 0x05, 0x78, 0x1A, 0xB6, 0xA8, 0x7A,
+ 0xF9, 0x98, 0x6A, 0xA6, 0x46, 0x84, 0x2E, 0xF6,
+ 0x4B, 0xDC, 0x9B, 0x8F, 0x9B, 0x8F, 0xEE, 0xB4,
+ 0xAA, 0x3F, 0xEE, 0xC0, 0x37, 0x27, 0x76, 0xC7,
+ 0x95, 0xBB, 0x26, 0x74, 0x69, 0x12, 0x7F, 0xF1,
+ 0xBB, 0xFF, 0xAE, 0xB5, 0x99, 0x6E, 0xCB, 0x0C
};
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest[] = {
- 0x9a, 0X4f, 0X88, 0X1b, 0Xb6, 0X8f, 0Xd8, 0X60,
- 0X42, 0X1a, 0X7d, 0X3d, 0Xf5, 0X82, 0X80, 0Xf1,
- 0X18, 0X8c, 0X1d, 0X32 };
+ 0x9a, 0x4f, 0x88, 0x1b, 0xb6, 0x8f, 0xd8, 0x60,
+ 0x42, 0x1a, 0x7d, 0x3d, 0xf5, 0x82, 0x80, 0xf1,
+ 0x18, 0x8c, 0x1d, 0x32
+};
static int
@@ -776,7 +777,6 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
-
ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
@@ -786,59 +786,65 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.key.data = hmac_sha1_key;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
- /* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ /* Create crypto session*/
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
- /* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ /* Generate crypto op data structure */
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operationsource mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ /* Set crypto operation authentication parameters */
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ /* Set crypto operation cipher parameters */
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ uint8_t *ciphertext = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC);
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(ciphertext,
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
- "Ciphertext data not as expected");
+ "ciphertext data not as expected");
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ uint8_t *digest = ciphertext + QUOTE_512_BYTES;
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(digest,
catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest,
gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 :
@@ -863,60 +869,66 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
- ut_params->ol, 2),
+ TEST_ASSERT_NOT_NULL(rte_crypto_op_sym_xforms_alloc(ut_params->op, 2),
"failed to allocate space for crypto transforms");
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ sym_op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
/* cipher parameters */
- ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- ut_params->op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->op->xform->cipher.key.data = aes_cbc_key;
- ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
+ sym_op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ sym_op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ sym_op->xform->cipher.key.data = aes_cbc_key;
+ sym_op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ sym_op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- ut_params->op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
- ut_params->op->xform->next->auth.key.data = hmac_sha1_key;
- ut_params->op->xform->next->auth.digest_length =
+ sym_op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ sym_op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ sym_op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
+ sym_op->xform->next->auth.key.data = hmac_sha1_key;
+ sym_op->xform->next->auth.digest_length =
DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -985,42 +997,48 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ /* attach symmetric crypto session to crypto operations */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
+
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1088,47 +1106,51 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1197,42 +1219,47 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
- /* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1244,7 +1271,6 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
"Digest verification failed");
-
return TEST_SUCCESS;
}
@@ -1312,43 +1338,46 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
-
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1367,7 +1396,6 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
DIGEST_BYTE_LENGTH_SHA512,
"Generated digest data not as expected");
-
return TEST_SUCCESS;
}
@@ -1424,6 +1452,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
ut_params->auth_xform.auth.key.data = hmac_sha512_key;
ut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA512;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
+
return TEST_SUCCESS;
}
@@ -1448,43 +1477,46 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
- /* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1497,8 +1529,6 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
"Digest verification failed");
-
-
return TEST_SUCCESS;
}
@@ -1524,10 +1554,6 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
catch_22_quote, QUOTE_512_BYTES, 0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_AES_XCBC);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
@@ -1548,56 +1574,75 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->iv.data = (uint8_t *)
- rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ /* Set operation cipher parameters */
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->m_src, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(sym_op->m_src);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
+
+ /* Set operation authentication parameters */
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ sym_op->m_src, DIGEST_BYTE_LENGTH_AES_XCBC);
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->m_src,
+ CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES);
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+ memset(sym_op->auth.digest.data, 0, DIGEST_BYTE_LENGTH_AES_XCBC);
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC),
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
"Ciphertext data not as expected");
+
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ rte_pktmbuf_mtod_offset(
+ ut_params->op->sym->m_src, uint8_t *,
+ CIPHER_IV_LENGTH_AES_CBC +
+ QUOTE_512_BYTES),
catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
- DIGEST_BYTE_LENGTH_AES_XCBC,
+ gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
+ TRUNCATED_DIGEST_BYTE_LENGTH_AES_XCBC :
+ DIGEST_BYTE_LENGTH_AES_XCBC,
"Generated digest data not as expected");
return TEST_SUCCESS;
@@ -1614,14 +1659,6 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
(const char *)catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES, 0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_AES_XCBC);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
- rte_memcpy(ut_params->digest,
- catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
- DIGEST_BYTE_LENGTH_AES_XCBC);
-
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
@@ -1648,35 +1685,55 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ /* Set crypto operation data parameters */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ ut_params->ibuf, DIGEST_BYTE_LENGTH_AES_XCBC);
+ TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+ "no room to append digest");
+
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ ut_params->ibuf, QUOTE_512_BYTES);
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+ rte_memcpy(sym_op->auth.digest.data,
+ catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
+ DIGEST_BYTE_LENGTH_AES_XCBC);
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1837,50 +1894,49 @@ test_not_in_place_crypto(void)
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+ sym_op->m_dst = dst_m;
+
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
-
- ut_params->op->dst.m = dst_m;
- ut_params->op->dst.offset = 0;
-
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process op obuf");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->op->dst.m, char *),
+ rte_pktmbuf_mtod(ut_params->op->sym->m_dst, char *),
catch_22_quote,
QUOTE_512_BYTES,
"Plaintext data not as expected");
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 034393e..c84ba42 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -32,8 +32,6 @@
#ifndef TEST_CRYPTODEV_H_
#define TEST_CRYPTODEV_H_
-#define HEX_DUMP 0
-
#define FALSE 0
#define TRUE 1
@@ -47,8 +45,9 @@
#define DEFAULT_NUM_XFORMS (2)
#define NUM_MBUFS (8191)
#define MBUF_CACHE_SIZE (250)
-#define MBUF_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512 + \
- sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATAPAYLOAD_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512)
+#define MBUF_SIZE (sizeof(struct rte_mbuf) + \
+ RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
#define BYTE_LENGTH(x) (x/8)
/* HASH DIGEST LENGTHS */
@@ -62,7 +61,9 @@
#define AES_XCBC_MAC_KEY_SZ (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 (12)
+#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA224 (14)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA256 (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA512 (32)
+#define TRUNCATED_DIGEST_BYTE_LENGTH_AES_XCBC (12)
#endif /* TEST_CRYPTODEV_H_ */
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 42dd9bc..b3f4fd9 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -32,7 +32,6 @@
#include <rte_common.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
@@ -50,7 +49,7 @@
struct crypto_testsuite_params {
struct rte_mempool *mbuf_mp;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
uint16_t nb_queue_pairs;
@@ -68,8 +67,7 @@ struct crypto_unittest_params {
struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_sym_op *op;
- struct rte_mbuf_offload *ol;
+ struct rte_crypto_op *op;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
struct rte_mbuf *ibuf[MAX_NUM_OF_OPS_PER_UT];
@@ -79,7 +77,7 @@ struct crypto_unittest_params {
static struct rte_mbuf *
setup_test_string(struct rte_mempool *mpool,
- const char *string, size_t len, uint8_t blocksize)
+ const uint8_t *data, size_t len, uint8_t blocksize)
{
struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
size_t t_len = len - (blocksize ? (len % blocksize) : 0);
@@ -92,7 +90,7 @@ setup_test_string(struct rte_mempool *mpool,
return NULL;
}
- rte_memcpy(dst, string, t_len);
+ rte_memcpy(dst, (const void *)data, t_len);
}
return m;
}
@@ -113,23 +111,24 @@ testsuite_setup(void)
ts_params->mbuf_mp = rte_mempool_lookup("CRYPTO_PERF_MBUFPOOL");
if (ts_params->mbuf_mp == NULL) {
/* Not already created so create */
- ts_params->mbuf_mp = rte_mempool_create("CRYPTO_PERF_MBUFPOOL", NUM_MBUFS,
- MBUF_SIZE, MBUF_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
- rte_socket_id(), 0);
+ ts_params->mbuf_mp = rte_pktmbuf_pool_create(
+ "CRYPTO_PERF_MBUFPOOL",
+ NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
+ rte_socket_id());
if (ts_params->mbuf_mp == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_PERF_MBUFPOOL\n");
return TEST_FAILED;
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
- NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_sym_xform),
- rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+
+ ts_params->op_mpool = rte_crypto_op_pool_create("CRYPTO_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ NUM_MBUFS, MBUF_CACHE_SIZE,
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_sym_xform),
+ rte_socket_id());
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -256,8 +255,8 @@ ut_teardown(void)
ut_params->sess);
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
for (i = 0; i < MAX_NUM_OF_OPS_PER_UT; i++) {
if (ut_params->obuf[i])
@@ -1698,11 +1697,12 @@ struct crypto_data_params aes_cbc_hmac_sha256_output[MAX_PACKET_SIZE_INDEX] = {
static int
test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
{
- uint32_t num_to_submit = 2048, max_outstanding_reqs = 512;
- struct rte_mbuf *rx_mbufs[num_to_submit], *tx_mbufs[num_to_submit];
+ uint32_t num_to_submit = 4096;
+ struct rte_crypto_op *c_ops[num_to_submit];
+ struct rte_crypto_op *proc_ops[num_to_submit];
uint64_t failed_polls, retries, start_cycles, end_cycles, total_cycles = 0;
uint32_t burst_sent, burst_received;
- uint32_t b, burst_size, num_sent, num_received;
+ uint32_t i, burst_size, num_sent, num_received;
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1739,46 +1739,50 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure(s) */
- for (b = 0; b < num_to_submit ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ for (i = 0; i < num_to_submit ; i++) {
+ struct rte_mbuf *m = setup_test_string(ts_params->mbuf_mp,
+ data_params[0].expected.ciphertext,
data_params[0].length, 0);
- TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
+ TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(tx_mbufs[b],
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ TEST_ASSERT_NOT_NULL(ut_params->digest,
+ "no room to append digest");
rte_memcpy(ut_params->digest, data_params[0].expected.digest,
DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_sym_op *cop = &ol->op.crypto;
+ struct rte_crypto_op *op =
+ rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+ rte_crypto_op_attach_sym_session(op, ut_params->sess);
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
+ op->sym->auth.digest.data = ut_params->digest;
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
data_params[0].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->auth.data.length = data_params[0].length;
+
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ op->sym->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(m,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[0].length;
+ op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.data.length = data_params[0].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[0].length;
+ op->sym->m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[i] = op;
}
printf("\nTest to measure the IA cycle cost using AES128_CBC_SHA256_HMAC "
@@ -1789,17 +1793,17 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
printf("\nDev No\tQP No\tNum Sent\tNum Received\tTx/Rx burst");
printf("\tRetries (Device Busy)\tAverage IA cycle cost "
"(assuming 0 retries)");
- for (b = 2; b <= 128 ; b *= 2) {
+ for (i = 2; i <= 128 ; i *= 2) {
num_sent = 0;
num_received = 0;
retries = 0;
failed_polls = 0;
- burst_size = b;
+ burst_size = i;
total_cycles = 0;
while (num_sent < num_to_submit) {
start_cycles = rte_rdtsc_precise();
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0,
- &tx_mbufs[num_sent],
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, &c_ops[num_sent],
((num_to_submit-num_sent) < burst_size) ?
num_to_submit-num_sent : burst_size);
if (burst_sent == 0)
@@ -1814,9 +1818,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1824,15 +1827,15 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
end_cycles = rte_rdtsc_precise();
total_cycles += end_cycles - start_cycles;
}
+
while (num_received != num_to_submit) {
if (gbl_cryptodev_preftest_devtype ==
- RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num,
- 0, NULL, 0);
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1846,16 +1849,9 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
printf("\n");
- for (b = 0; b < max_outstanding_reqs ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ for (i = 0; i < num_to_submit ; i++) {
+ rte_pktmbuf_free(c_ops[i]->sym->m_src);
+ rte_crypto_op_free(c_ops[i]);
}
return TEST_SUCCESS;
}
@@ -1865,11 +1861,14 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
{
uint16_t index;
uint32_t burst_sent, burst_received;
- uint32_t b, num_sent, num_received, throughput;
+ uint32_t b, num_sent, num_received;
uint64_t failed_polls, retries, start_cycles, end_cycles;
const uint64_t mhz = rte_get_tsc_hz()/1000000;
- double mmps;
- struct rte_mbuf *rx_mbufs[DEFAULT_BURST_SIZE], *tx_mbufs[DEFAULT_BURST_SIZE];
+ double throughput, mmps;
+
+ struct rte_crypto_op *c_ops[DEFAULT_BURST_SIZE];
+ struct rte_crypto_op *proc_ops[DEFAULT_BURST_SIZE];
+
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1908,7 +1907,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
"AES128_CBC_SHA256_HMAC requests with a constant burst "
"size of %u while varying payload sizes", DEFAULT_BURST_SIZE);
printf("\nDev No\tQP No\tReq Size(B)\tNum Sent\tNum Received\t"
- "Mrps\tThoughput(Mbps)");
+ "Mrps\tThoughput(Gbps)");
printf("\tRetries (Attempted a burst, but the device was busy)");
for (index = 0; index < MAX_PACKET_SIZE_INDEX; index++) {
num_sent = 0;
@@ -1918,63 +1917,70 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
+ struct rte_mbuf *m = setup_test_string(
+ ts_params->mbuf_mp,
+ (const uint8_t *)
data_params[index].plaintext,
data_params[index].length,
0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(
- tx_mbufs[b], DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
+ DIGEST_BYTE_LENGTH_SHA256);
+ TEST_ASSERT_NOT_NULL(ut_params->digest
+ , "no room to append digest");
- rte_memcpy(ut_params->digest, data_params[index].expected.digest,
- DIGEST_BYTE_LENGTH_SHA256);
+ rte_memcpy(ut_params->digest,
+ data_params[index].expected.digest,
+ DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
+ struct rte_crypto_op *op = rte_crypto_op_alloc(
+ ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- struct rte_crypto_sym_op *cop = &ol->op.crypto;
+ rte_crypto_op_attach_sym_session(op, ut_params->sess);
- rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+ op->sym->auth.digest.data = ut_params->digest;
+ op->sym->auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(m,
+ data_params[index].length);
+ op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
- tx_mbufs[b], data_params[index].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->auth.data.length = data_params[index].length;
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ op->sym->cipher.iv.data = (uint8_t *)
+ rte_pktmbuf_prepend(m,
+ CIPHER_IV_LENGTH_AES_CBC);
+ op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.data.length = data_params[index].length;
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[index].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[index].length;
+ op->sym->m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[b] = op;
}
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num,
- 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
- < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
- DEFAULT_BURST_SIZE);
+ uint16_t burst_size = (DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent) < DEFAULT_BURST_SIZE ?
+ DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent : DEFAULT_BURST_SIZE;
+
+ burst_sent = rte_cryptodev_enqueue_burst(
+ dev_num, 0, c_ops, burst_size);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, DEFAULT_BURST_SIZE);
+ burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ 0, proc_ops, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
@@ -1982,37 +1988,34 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
if (gbl_cryptodev_preftest_devtype ==
- RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num,
- 0, NULL, 0);
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num, 0,
- rx_mbufs, DEFAULT_BURST_SIZE);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops,
+ DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
num_received += burst_received;
}
end_cycles = rte_rdtsc_precise();
- mmps = (double)num_received*mhz/(end_cycles - start_cycles);
- throughput = mmps*data_params[index].length*8;
+ mmps = ((double)num_received * mhz) /
+ (end_cycles - start_cycles);
+ throughput = (mmps * data_params[index].length * 8) / 1000;
+
printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
- data_params[index].length, num_sent, num_received);
- printf("\t%.2f\t%u", mmps, throughput);
+ data_params[index].length,
+ num_sent, num_received);
+ printf("\t%.2f\t%.2f", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ rte_pktmbuf_free(c_ops[b]->sym->m_src);
+ rte_crypto_op_free(c_ops[b]);
}
}
+
printf("\n");
return TEST_SUCCESS;
}
diff --git a/config/common_bsdapp b/config/common_bsdapp
index 696382c..157f9aa 100644
--- a/config/common_bsdapp
+++ b/config/common_bsdapp
@@ -357,13 +357,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
#
-# Compile librte_mbuf_offload
-# EXPERIMENTAL: API may change without prior notice
-#
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n
-
-#
# Compile librte_timer
#
CONFIG_RTE_LIBRTE_TIMER=y
diff --git a/config/common_linuxapp b/config/common_linuxapp
index f1638db..e2dc636 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -330,7 +330,7 @@ CONFIG_RTE_CRYPTODEV_NAME_LEN=64
#
# Compile PMD for QuickAssist based devices
#
-CONFIG_RTE_LIBRTE_PMD_QAT=n
+CONFIG_RTE_LIBRTE_PMD_QAT=y
CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_INIT=n
CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_TX=n
CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_RX=n
@@ -344,7 +344,7 @@ CONFIG_RTE_QAT_PMD_MAX_NB_SESSIONS=2048
#
# Compile PMD for AESNI backed device
#
-CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
+CONFIG_RTE_LIBRTE_PMD_AESNI_MB=y
CONFIG_RTE_LIBRTE_PMD_AESNI_MB_DEBUG=n
CONFIG_RTE_AESNI_MB_PMD_MAX_NB_QUEUE_PAIRS=8
CONFIG_RTE_AESNI_MB_PMD_MAX_NB_SESSIONS=2048
@@ -373,13 +373,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
#
-# Compile librte_mbuf_offload
-# EXPERIMENTAL: API may change without prior notice
-#
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n
-
-#
# Compile librte_timer
#
CONFIG_RTE_LIBRTE_TIMER=y
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 7a91001..f626386 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -104,7 +104,6 @@ There are many libraries, so their headers may be grouped by topics:
- **containers**:
[mbuf] (@ref rte_mbuf.h),
- [mbuf_offload] (@ref rte_mbuf_offload.h),
[ring] (@ref rte_ring.h),
[distributor] (@ref rte_distributor.h),
[reorder] (@ref rte_reorder.h),
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 61d93cd..5b26444 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -37,7 +37,6 @@
#include <rte_dev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
-#include <rte_mbuf_offload.h>
#include "rte_aesni_mb_pmd_private.h"
@@ -296,16 +295,16 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(crypto_op->session->type !=
+ if (op->sym->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
- sess = (struct aesni_mb_session *)crypto_op->session->_private;
+ sess = (struct aesni_mb_session *)op->sym->session->_private;
} else {
void *_sess = NULL;
@@ -316,7 +315,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
- sess, crypto_op->xform) != 0)) {
+ sess, op->sym->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
}
@@ -338,12 +337,14 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
* - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
*/
static JOB_AES_HMAC *
-process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_sym_op *c_op,
+process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
+ struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+ uint16_t m_offset = 0;
+
job = (*qp->ops->job.get_next)(&qp->mb_mgr);
if (unlikely(job == NULL))
return job;
@@ -372,49 +373,65 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
}
/* Mutable crypto operation parameters */
+ if (op->sym->m_dst) {
+ m_src = m_dst = op->sym->m_dst;
+
+ /* append space for output data to mbuf */
+ char *odata = rte_pktmbuf_append(m_dst,
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (odata == NULL)
+ MB_LOG_ERR("failed to allocate space in destination "
+ "mbuf for source data");
+
+ memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ } else {
+ m_dst = m_src;
+ m_offset = op->sym->cipher.data.offset;
+ }
/* Set digest output location */
if (job->cipher_direction == DECRYPT) {
- job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m,
+ job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
get_digest_byte_length(job->hash_alg));
- if (job->auth_tag_output)
- memset(job->auth_tag_output, 0,
- sizeof(get_digest_byte_length(job->hash_alg)));
- else
+ if (job->auth_tag_output == NULL) {
+ MB_LOG_ERR("failed to allocate space in output mbuf "
+ "for temp digest");
return NULL;
+ }
+
+ memset(job->auth_tag_output, 0,
+ sizeof(get_digest_byte_length(job->hash_alg)));
+
} else {
- job->auth_tag_output = c_op->digest.data;
+ job->auth_tag_output = op->sym->auth.digest.data;
}
/*
- * Multiple buffer library current only support returning a truncated
+ * Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
job->auth_tag_output_len_in_bytes =
get_truncated_digest_byte_length(job->hash_alg);
/* Set IV parameters */
- job->iv = c_op->iv.data;
- job->iv_len_in_bytes = c_op->iv.length;
+ job->iv = op->sym->cipher.iv.data;
+ job->iv_len_in_bytes = op->sym->cipher.iv.length;
/* Data Parameter */
- job->src = rte_pktmbuf_mtod(m, uint8_t *);
- job->dst = c_op->dst.m ?
- rte_pktmbuf_mtod(c_op->dst.m, uint8_t *) +
- c_op->dst.offset :
- rte_pktmbuf_mtod(m, uint8_t *) +
- c_op->data.to_cipher.offset;
+ job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
- job->cipher_start_src_offset_in_bytes = c_op->data.to_cipher.offset;
- job->msg_len_to_cipher_in_bytes = c_op->data.to_cipher.length;
+ job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
- job->hash_start_src_offset_in_bytes = c_op->data.to_hash.offset;
- job->msg_len_to_hash_in_bytes = c_op->data.to_hash.length;
+ job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
/* Set user data to be crypto operation data struct */
- job->user_data = m;
- job->user_data2 = c_op;
+ job->user_data = op;
+ job->user_data2 = m_dst;
return job;
}
@@ -429,43 +446,41 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
* verification of supplied digest in the case of a HASH_CIPHER operation
* - Returns NULL on invalid job
*/
-static struct rte_mbuf *
+static struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m;
- struct rte_crypto_sym_op *c_op;
+ struct rte_crypto_op *op =
+ (struct rte_crypto_op *)job->user_data;
+ struct rte_mbuf *m_dst =
+ (struct rte_mbuf *)job->user_data2;
- if (job->user_data == NULL)
+ if (op == NULL || m_dst == NULL)
return NULL;
- /* handled retrieved job */
- m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_sym_op *)job->user_data2;
-
/* set status as successful by default */
- c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* check if job has been processed */
if (unlikely(job->status != STS_COMPLETED)) {
- c_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return m;
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return op;
} else if (job->chain_order == HASH_CIPHER) {
/* Verify digest if required */
- if (memcmp(job->auth_tag_output, c_op->digest.data,
+ if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
job->auth_tag_output_len_in_bytes) != 0)
- c_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m, get_digest_byte_length(job->hash_alg));
+ rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
- rte_mempool_put(qp->sess_mp, c_op->session);
- c_op->session = NULL;
+ if (op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
}
- return m;
+ return op;
}
/**
@@ -481,16 +496,16 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
static unsigned
handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m = NULL;
+ struct rte_crypto_op *op = NULL;
unsigned processed_jobs = 0;
while (job) {
processed_jobs++;
- m = post_process_mb_job(qp, job);
- if (m)
- rte_ring_enqueue(qp->processed_pkts, (void *)m);
+ op = post_process_mb_job(qp, job);
+ if (op)
+ rte_ring_enqueue(qp->processed_pkts, (void *)op);
else
- qp->qp_stats.dequeue_err_count++;
+ qp->stats.dequeue_err_count++;
job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
}
@@ -499,53 +514,53 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
static uint16_t
-aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
- uint16_t nb_bufs)
+aesni_mb_pmd_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
-
struct aesni_mb_session *sess;
- struct aesni_mb_qp *qp = queue_pair;
+ struct aesni_mb_qp *_qp = qp;
JOB_AES_HMAC *job = NULL;
int i, processed_jobs = 0;
- for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i],
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- if (unlikely(ol == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ for (i = 0; i < nb_ops; i++) {
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ MB_LOG_ERR("PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
-
- sess = get_session(qp, &ol->op.crypto);
+#endif
+ sess = get_session(_qp, ops[i]);
if (unlikely(sess == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
- job = process_crypto_op(qp, bufs[i], &ol->op.crypto, sess);
+ job = process_crypto_op(_qp, ops[i], sess);
if (unlikely(job == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
/* Submit Job */
- job = (*qp->ops->job.submit)(&qp->mb_mgr);
+ job = (*_qp->ops->job.submit)(&_qp->mb_mgr);
/*
* If submit returns a processed job then handle it,
* before submitting subsequent jobs
*/
if (job)
- processed_jobs += handle_completed_jobs(qp, job);
+ processed_jobs += handle_completed_jobs(_qp, job);
}
if (processed_jobs == 0)
goto flush_jobs;
else
- qp->qp_stats.enqueued_count += processed_jobs;
+ _qp->stats.enqueued_count += processed_jobs;
return i;
flush_jobs:
@@ -553,24 +568,24 @@ flush_jobs:
* If we haven't processed any jobs in submit loop, then flush jobs
* queue to stop the output stalling
*/
- job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
+ job = (*_qp->ops->job.flush_job)(&_qp->mb_mgr);
if (job)
- qp->qp_stats.enqueued_count += handle_completed_jobs(qp, job);
+ _qp->stats.enqueued_count += handle_completed_jobs(_qp, job);
return i;
}
static uint16_t
-aesni_mb_pmd_dequeue_burst(void *queue_pair,
- struct rte_mbuf **bufs, uint16_t nb_bufs)
+aesni_mb_pmd_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct aesni_mb_qp *qp = queue_pair;
+ struct aesni_mb_qp *_qp = qp;
unsigned nb_dequeued;
- nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)bufs, nb_bufs);
- qp->qp_stats.dequeued_count += nb_dequeued;
+ nb_dequeued = rte_ring_dequeue_burst(_qp->processed_pkts,
+ (void **)ops, nb_ops);
+ _qp->stats.dequeued_count += nb_dequeued;
return nb_dequeued;
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 3cd9990..d56de12 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -76,11 +76,11 @@ aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- stats->enqueued_count += qp->qp_stats.enqueued_count;
- stats->dequeued_count += qp->qp_stats.dequeued_count;
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
- stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
- stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
}
}
@@ -93,7 +93,7 @@ aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
}
}
@@ -196,7 +196,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp->sess_mp = dev->data->session_pool;
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
/* Initialise multi-buffer manager */
(*qp->ops->job.init_mgr)(&qp->mb_mgr);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index ab70c15..0aed177 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -150,7 +150,7 @@ struct aesni_mb_qp {
/**< Ring for placing process packets */
struct rte_mempool *sess_mp;
/**< Session Mempool */
- struct rte_cryptodev_stats qp_stats;
+ struct rte_cryptodev_stats stats;
/**< Queue pair statistics */
} __rte_cache_aligned;
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index e7b9027..69162b1 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -59,7 +59,6 @@
#include <rte_mbuf.h>
#include <rte_string_fns.h>
#include <rte_spinlock.h>
-#include <rte_mbuf_offload.h>
#include <rte_hexdump.h>
#include "qat_logs.h"
@@ -72,7 +71,7 @@ static inline uint32_t
adf_modulo(uint32_t data, uint32_t shift);
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg);
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
void *session)
@@ -275,15 +274,16 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
register struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
register uint32_t nb_pkts_sent = 0;
- register struct rte_mbuf **cur_tx_pkt = tx_pkts;
+ register struct rte_crypto_op **cur_op = ops;
register int ret;
- uint16_t nb_pkts_possible = nb_pkts;
+ uint16_t nb_ops_possible = nb_ops;
register uint8_t *base_addr;
register uint32_t tail;
int overflow;
@@ -294,19 +294,17 @@ uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = queue->tail;
/* Find how many can actually fit on the ring */
- overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_pkts)
+ overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
- queue->max_inflights;
if (overflow > 0) {
rte_atomic16_sub(&tmp_qp->inflights16, overflow);
- nb_pkts_possible = nb_pkts - overflow;
- if (nb_pkts_possible == 0)
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
return 0;
}
- while (nb_pkts_sent != nb_pkts_possible) {
-
- ret = qat_alg_write_mbuf_entry(*cur_tx_pkt,
- base_addr + tail);
+ while (nb_pkts_sent != nb_ops_possible) {
+ ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
if (nb_pkts_sent == 0)
@@ -316,7 +314,7 @@ uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = adf_modulo(tail + queue->msg_size, queue->modulo);
nb_pkts_sent++;
- cur_tx_pkt++;
+ cur_op++;
}
kick_tail:
WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
@@ -327,14 +325,13 @@ kick_tail:
}
uint16_t
-qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
uint32_t msg_counter = 0;
- struct rte_mbuf *rx_mbuf;
+ struct rte_crypto_op *rx_op;
struct icp_qat_fw_comn_resp *resp_msg;
queue = &(tmp_qp->rx_q);
@@ -342,17 +339,20 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
((uint8_t *)queue->base_addr + queue->head);
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
- msg_counter != nb_pkts) {
- rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
+ msg_counter != nb_ops) {
+ rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
*(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
queue->head = adf_modulo(queue->head +
@@ -361,9 +361,8 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
resp_msg = (struct icp_qat_fw_comn_resp *)
((uint8_t *)queue->base_addr +
queue->head);
-
- *rx_pkts = rx_mbuf;
- rx_pkts++;
+ *ops = rx_op;
+ ops++;
msg_counter++;
}
if (msg_counter > 0) {
@@ -377,38 +376,36 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
}
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
-{
- struct rte_mbuf_offload *ol;
-
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
+ {
struct qat_session *ctx;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
- if (unlikely(ol == NULL)) {
- PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
- "to (%p) mbuf.", mbuf);
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
return -EINVAL;
}
-
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
+#endif
+ if (unlikely(op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
- " requests mbuf (%p) is sessionless.", mbuf);
+ " requests, op (%p) is sessionless.", op);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type
- != RTE_CRYPTODEV_QAT_SYM_PMD)) {
+ if (unlikely(op->sym->session->type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
- ctx = (struct qat_session *)ol->op.crypto.session->_private;
+ ctx = (struct qat_session *)op->sym->session->_private;
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
*qat_req = ctx->fw_req;
- qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)mbuf;
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
/*
* The following code assumes:
@@ -416,37 +413,37 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
* - always in place.
*/
qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length = mbuf->data_len;
+ qat_req->comn_mid.src_length =
+ rte_pktmbuf_data_len(op->sym->m_src);
qat_req->comn_mid.dest_data_addr =
qat_req->comn_mid.src_data_addr =
- rte_pktmbuf_mtophys(mbuf);
-
+ rte_pktmbuf_mtophys(op->sym->m_src);
cipher_param = (void *)&qat_req->serv_specif_rqpars;
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
- cipher_param->cipher_length = ol->op.crypto.data.to_cipher.length;
- cipher_param->cipher_offset = ol->op.crypto.data.to_cipher.offset;
- if (ol->op.crypto.iv.length &&
- (ol->op.crypto.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array))) {
+ cipher_param->cipher_length = op->sym->cipher.data.length;
+ cipher_param->cipher_offset = op->sym->cipher.data.offset;
+ if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array))) {
rte_memcpy(cipher_param->u.cipher_IV_array,
- ol->op.crypto.iv.data, ol->op.crypto.iv.length);
+ op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
} else {
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr = ol->op.crypto.iv.phys_addr;
+ cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr;
}
- if (ol->op.crypto.digest.phys_addr) {
+ if (op->sym->auth.digest.phys_addr) {
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
- auth_param->auth_res_addr = ol->op.crypto.digest.phys_addr;
+ auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
}
- auth_param->auth_off = ol->op.crypto.data.to_hash.offset;
- auth_param->auth_len = ol->op.crypto.data.to_hash.length;
- auth_param->u1.aad_adr = ol->op.crypto.additional_auth.phys_addr;
+ auth_param->auth_off = op->sym->auth.data.offset;
+ auth_param->auth_len = op->sym->auth.data.length;
+ auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
/* (GCM) aad length(240 max) will be at this location after precompute */
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
@@ -457,9 +454,19 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
}
auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));
+ rte_hexdump(stdout, "src_data:",
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ op->sym->auth.digest.length);
+ rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
+ op->sym->auth.aad.length);
#endif
return 0;
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index e9f71fe..9323383 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -115,12 +115,12 @@ extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
-uint16_t
-qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
-uint16_t
-qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 85700fc..5e51aca 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -92,8 +92,8 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst;
internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 93de786..9a9dd55 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -62,7 +62,6 @@
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_memcpy.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -85,6 +84,7 @@
*/
#define RTE_TEST_RX_DESC_DEFAULT 128
#define RTE_TEST_TX_DESC_DEFAULT 512
+
static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
@@ -104,6 +104,11 @@ struct pkt_buffer {
struct rte_mbuf *buffer[MAX_PKT_BURST];
};
+struct op_buffer {
+ unsigned len;
+ struct rte_crypto_op *buffer[MAX_PKT_BURST];
+};
+
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
@@ -112,6 +117,12 @@ enum l2fwd_crypto_xform_chain {
L2FWD_CRYPTO_HASH_CIPHER
};
+struct l2fwd_key {
+ uint8_t *data;
+ uint32_t length;
+ phys_addr_t phys_addr;
+};
+
/** l2fwd crypto application command line options */
struct l2fwd_crypto_options {
unsigned portmask;
@@ -127,7 +138,7 @@ struct l2fwd_crypto_options {
struct rte_crypto_sym_xform cipher_xform;
uint8_t ckey_data[32];
- struct rte_crypto_sym_key iv_key;
+ struct l2fwd_key iv_key;
uint8_t ivkey_data[16];
struct rte_crypto_sym_xform auth_xform;
@@ -142,10 +153,7 @@ struct l2fwd_crypto_params {
unsigned digest_length;
unsigned block_size;
- struct {
- uint8_t *data;
- uint16_t length;
- } iv;
+ struct l2fwd_key iv_key;
struct rte_cryptodev_sym_session *session;
};
@@ -157,14 +165,16 @@ struct lcore_queue_conf {
unsigned nb_crypto_devs;
unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
- struct pkt_buffer crypto_pkt_buf[RTE_MAX_ETHPORTS];
- struct pkt_buffer tx_pkt_buf[RTE_MAX_ETHPORTS];
+ struct op_buffer op_buf[RTE_MAX_ETHPORTS];
+ struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
} __rte_cache_aligned;
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
static const struct rte_eth_conf port_conf = {
.rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+ .max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
@@ -178,7 +188,7 @@ static const struct rte_eth_conf port_conf = {
};
struct rte_mempool *l2fwd_pktmbuf_pool;
-struct rte_mempool *l2fwd_mbuf_ol_pool;
+struct rte_mempool *l2fwd_crypto_op_pool;
/* Per-port statistics struct */
struct l2fwd_port_statistics {
@@ -203,7 +213,7 @@ struct l2fwd_crypto_statistics crypto_statistics[RTE_MAX_ETHPORTS];
/* A tsc-based timer responsible for triggering statistics printout */
#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
-#define MAX_TIMER_PERIOD 86400 /* 1 day max */
+#define MAX_TIMER_PERIOD 86400UL /* 1 day max */
/* default period is 10 seconds */
static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000;
@@ -292,20 +302,21 @@ static int
l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
struct l2fwd_crypto_params *cparams)
{
- struct rte_mbuf **pkt_buffer;
+ struct rte_crypto_op **op_buffer;
unsigned ret;
- pkt_buffer = (struct rte_mbuf **)
- qconf->crypto_pkt_buf[cparams->dev_id].buffer;
+ op_buffer = (struct rte_crypto_op **)
+ qconf->op_buf[cparams->dev_id].buffer;
+
+ ret = rte_cryptodev_enqueue_burst(cparams->dev_id,
+ cparams->qp_id, op_buffer, (uint16_t) n);
- ret = rte_cryptodev_enqueue_burst(cparams->dev_id, cparams->qp_id,
- pkt_buffer, (uint16_t) n);
crypto_statistics[cparams->dev_id].enqueued += ret;
if (unlikely(ret < n)) {
crypto_statistics[cparams->dev_id].errors += (n - ret);
do {
- rte_pktmbuf_offload_free(pkt_buffer[ret]->offload_ops);
- rte_pktmbuf_free(pkt_buffer[ret]);
+ rte_pktmbuf_free(op_buffer[ret]->sym->m_src);
+ rte_crypto_op_free(op_buffer[ret]);
} while (++ret < n);
}
@@ -313,7 +324,8 @@ l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
}
static int
-l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
+l2fwd_crypto_enqueue(struct rte_crypto_op *op,
+ struct l2fwd_crypto_params *cparams)
{
unsigned lcore_id, len;
struct lcore_queue_conf *qconf;
@@ -321,23 +333,23 @@ l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->crypto_pkt_buf[cparams->dev_id].len;
- qconf->crypto_pkt_buf[cparams->dev_id].buffer[len] = m;
+ len = qconf->op_buf[cparams->dev_id].len;
+ qconf->op_buf[cparams->dev_id].buffer[len] = op;
len++;
- /* enough pkts to be sent */
+ /* enough ops to be sent */
if (len == MAX_PKT_BURST) {
l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams);
len = 0;
}
- qconf->crypto_pkt_buf[cparams->dev_id].len = len;
+ qconf->op_buf[cparams->dev_id].len = len;
return 0;
}
static int
l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
- struct rte_mbuf_offload *ol,
+ struct rte_crypto_op *op,
struct l2fwd_crypto_params *cparams)
{
struct ether_hdr *eth_hdr;
@@ -375,43 +387,43 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_op_attach_sym_session(op, cparams->session);
/* Append space for digest to end of packet */
- ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
+ op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
cparams->digest_length);
- ol->op.crypto.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
- ol->op.crypto.digest.length = cparams->digest_length;
+ op->sym->auth.digest.length = cparams->digest_length;
+
+ op->sym->auth.data.offset = ipdata_offset;
+ op->sym->auth.data.length = data_len;
- ol->op.crypto.iv.data = cparams->iv_key.data;
- ol->op.crypto.iv.phys_addr = cparams->iv_key.phys_addr;
- ol->op.crypto.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_cipher.offset = ipdata_offset;
- ol->op.crypto.data.to_cipher.length = data_len;
+ op->sym->cipher.iv.data = cparams->iv_key.data;
+ op->sym->cipher.iv.phys_addr = cparams->iv_key.phys_addr;
+ op->sym->cipher.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_hash.offset = ipdata_offset;
- ol->op.crypto.data.to_hash.length = data_len;
+ op->sym->cipher.data.offset = ipdata_offset;
+ op->sym->cipher.data.length = data_len;
- rte_pktmbuf_offload_attach(m, ol);
+ op->sym->m_src = m;
- return l2fwd_crypto_enqueue(m, cparams);
+ return l2fwd_crypto_enqueue(op, cparams);
}
/* Send the burst of packets on an output interface */
static int
-l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)
+l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
+ uint8_t port)
{
struct rte_mbuf **pkt_buffer;
unsigned ret;
- unsigned queueid = 0;
- pkt_buffer = (struct rte_mbuf **)qconf->tx_pkt_buf[port].buffer;
+ pkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer;
- ret = rte_eth_tx_burst(port, (uint16_t) queueid, pkt_buffer,
- (uint16_t)n);
+ ret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n);
port_statistics[port].tx += ret;
if (unlikely(ret < n)) {
port_statistics[port].dropped += (n - ret);
@@ -433,8 +445,8 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->tx_pkt_buf[port].len;
- qconf->tx_pkt_buf[port].buffer[len] = m;
+ len = qconf->pkt_buf[port].len;
+ qconf->pkt_buf[port].buffer[len] = m;
len++;
/* enough pkts to be sent */
@@ -443,7 +455,7 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
len = 0;
}
- qconf->tx_pkt_buf[port].len = len;
+ qconf->pkt_buf[port].len = len;
return 0;
}
@@ -503,6 +515,8 @@ static void
l2fwd_main_loop(struct l2fwd_crypto_options *options)
{
struct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST];
+ struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
+
unsigned lcore_id = rte_lcore_id();
uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
unsigned i, j, portid, nb_rx;
@@ -563,12 +577,12 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
if (unlikely(diff_tsc > drain_tsc)) {
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- if (qconf->tx_pkt_buf[portid].len == 0)
+ if (qconf->pkt_buf[portid].len == 0)
continue;
l2fwd_send_burst(&lcore_queue_conf[lcore_id],
- qconf->tx_pkt_buf[portid].len,
+ qconf->pkt_buf[portid].len,
(uint8_t) portid);
- qconf->tx_pkt_buf[portid].len = 0;
+ qconf->pkt_buf[portid].len = 0;
}
/* if timer is enabled */
@@ -597,7 +611,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
* Read packet from RX queues
*/
for (i = 0; i < qconf->nb_rx_ports; i++) {
- struct rte_mbuf_offload *ol;
+ struct rte_crypto_op *op;
portid = qconf->rx_port_list[i];
@@ -611,15 +625,14 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
/* Enqueue packets from Crypto device*/
for (j = 0; j < nb_rx; j++) {
m = pkts_burst[j];
- ol = rte_pktmbuf_offload_alloc(
- l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
+ op = rte_crypto_op_alloc(l2fwd_crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
/*
- * If we can't allocate a offload, then drop
+ * If we can't allocate a crypto_op, then drop
* the rest of the burst and dequeue and
* process the packets to free offload structs
*/
- if (unlikely(ol == NULL)) {
+ if (unlikely(op == NULL)) {
for (; j < nb_rx; j++) {
rte_pktmbuf_free(pkts_burst[j]);
port_statistics[portid].dropped++;
@@ -628,24 +641,31 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
}
rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- rte_prefetch0((void *)ol);
+ rte_prefetch0((void *)op);
- l2fwd_simple_crypto_enqueue(m, ol, cparams);
+ l2fwd_simple_crypto_enqueue(m, op, cparams);
}
/* Dequeue packets from Crypto device */
- nb_rx = rte_cryptodev_dequeue_burst(
- cparams->dev_id, cparams->qp_id,
- pkts_burst, MAX_PKT_BURST);
- crypto_statistics[cparams->dev_id].dequeued += nb_rx;
+ do {
+ nb_rx = rte_cryptodev_dequeue_burst(
+ cparams->dev_id, cparams->qp_id,
+ ops_burst, MAX_PKT_BURST);
- /* Forward crypto'd packets */
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- rte_pktmbuf_offload_free(m->offload_ops);
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- l2fwd_simple_forward(m, portid);
- }
+ crypto_statistics[cparams->dev_id].dequeued +=
+ nb_rx;
+
+ /* Forward crypto'd packets */
+ for (j = 0; j < nb_rx; j++) {
+ m = ops_burst[j]->sym->m_src;
+
+ rte_crypto_op_free(ops_burst[j]);
+ rte_prefetch0(rte_pktmbuf_mtod(m,
+ void *));
+
+ l2fwd_simple_forward(m, portid);
+ }
+ } while (nb_rx == MAX_PKT_BURST);
}
}
}
@@ -748,8 +768,8 @@ parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
/** Parse crypto key command line argument */
static int
-parse_key(struct rte_crypto_sym_key *key __rte_unused,
- unsigned length __rte_unused, char *arg __rte_unused)
+parse_key(struct l2fwd_key *key __rte_unused, unsigned length __rte_unused,
+ char *arg __rte_unused)
{
printf("Currently an unsupported argument!\n");
return -1;
@@ -759,26 +779,20 @@ parse_key(struct rte_crypto_sym_key *key __rte_unused,
static int
parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg)
{
- if (strcmp("SHA1", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA1;
+ if (strcmp("MD5_HMAC", optarg) == 0) {
+ *algo = RTE_CRYPTO_AUTH_MD5_HMAC;
return 0;
} else if (strcmp("SHA1_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
return 0;
- } else if (strcmp("SHA224", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA224;
- return 0;
} else if (strcmp("SHA224_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
return 0;
- } else if (strcmp("SHA256", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA256;
- return 0;
} else if (strcmp("SHA256_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
return 0;
- } else if (strcmp("SHA512", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA256;
+ } else if (strcmp("SHA384_HMAC", optarg) == 0) {
+ *algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
return 0;
} else if (strcmp("SHA512_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
@@ -809,7 +823,7 @@ static int
l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
struct option *lgopts, int option_index)
{
- if (strcmp(lgopts[option_index].name, "cdev_type") == 0)
+ if (strcmp(lgopts[option_index].name, "cdev") == 0)
return parse_cryptodev_type(&options->cdev_type, optarg);
else if (strcmp(lgopts[option_index].name, "chain") == 0)
@@ -824,11 +838,17 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return parse_cipher_op(&options->cipher_xform.cipher.op,
optarg);
- else if (strcmp(lgopts[option_index].name, "cipher_key") == 0)
- return parse_key(&options->cipher_xform.cipher.key,
- sizeof(options->ckey_data), optarg);
+ else if (strcmp(lgopts[option_index].name, "cipher_key") == 0) {
+ struct l2fwd_key key = { 0 };
+ int retval = 0;
+
+ retval = parse_key(&key, sizeof(options->ckey_data), optarg);
- else if (strcmp(lgopts[option_index].name, "iv") == 0)
+ options->cipher_xform.cipher.key.data = key.data;
+ options->cipher_xform.cipher.key.length = key.length;
+
+ return retval;
+ } else if (strcmp(lgopts[option_index].name, "iv") == 0)
return parse_key(&options->iv_key, sizeof(options->ivkey_data),
optarg);
@@ -841,11 +861,17 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return parse_auth_op(&options->auth_xform.auth.op,
optarg);
- else if (strcmp(lgopts[option_index].name, "auth_key") == 0)
- return parse_key(&options->auth_xform.auth.key,
- sizeof(options->akey_data), optarg);
+ else if (strcmp(lgopts[option_index].name, "auth_key") == 0) {
+ struct l2fwd_key key = { 0 };
+ int retval = 0;
+
+ retval = parse_key(&key, sizeof(options->akey_data), optarg);
- else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
+ options->auth_xform.auth.key.data = key.data;
+ options->auth_xform.auth.key.length = key.length;
+
+ return retval;
+ } else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
options->sessionless = 1;
return 0;
}
@@ -905,16 +931,16 @@ l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options,
const char *q_arg)
{
char *end = NULL;
- long int n;
+ unsigned long n;
/* parse number string */
- n = strtol(q_arg, &end, 10);
+ n = (unsigned)strtol(q_arg, &end, 10);
if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
n = 0;
if (n >= MAX_TIMER_PERIOD) {
- printf("Warning refresh period specified %ld is greater than "
- "max value %d! using max value",
+ printf("Warning refresh period specified %lu is greater than "
+ "max value %lu! using max value",
n, MAX_TIMER_PERIOD);
n = MAX_TIMER_PERIOD;
}
@@ -934,13 +960,13 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
options->nb_ports_per_lcore = 1;
options->refresh_period = 10000;
options->single_lcore = 0;
+ options->sessionless = 0;
options->cdev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
- options->sessionless = 0;
options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
/* Cipher Data */
- options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ options->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
options->cipher_xform.next = NULL;
options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -949,12 +975,11 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
generate_random_key(options->ckey_data, sizeof(options->ckey_data));
options->cipher_xform.cipher.key.data = options->ckey_data;
- options->cipher_xform.cipher.key.phys_addr = 0;
options->cipher_xform.cipher.key.length = 16;
/* Authentication Data */
- options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ options->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
options->auth_xform.next = NULL;
options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
@@ -966,7 +991,6 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
generate_random_key(options->akey_data, sizeof(options->akey_data));
options->auth_xform.auth.key.data = options->akey_data;
- options->auth_xform.auth.key.phys_addr = 0;
options->auth_xform.auth.key.length = 20;
}
@@ -993,39 +1017,6 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
printf("sessionless crypto: %s\n",
options->sessionless ? "enabled" : "disabled");
-#if 0
- options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
-
- /* Cipher Data */
- options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
- options->cipher_xform.next = NULL;
-
- options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- options->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
-
- generate_random_key(options->ckey_data, sizeof(options->ckey_data));
-
- options->cipher_xform.cipher.key.data = options->ckey_data;
- options->cipher_xform.cipher.key.phys_addr = 0;
- options->cipher_xform.cipher.key.length = 16;
-
-
- /* Authentication Data */
- options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
- options->auth_xform.next = NULL;
-
- options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
-
- options->auth_xform.auth.add_auth_data_length = 0;
- options->auth_xform.auth.digest_length = 20;
-
- generate_random_key(options->akey_data, sizeof(options->akey_data));
-
- options->auth_xform.auth.key.data = options->akey_data;
- options->auth_xform.auth.key.phys_addr = 0;
- options->auth_xform.auth.key.length = 20;
-#endif
}
/* Parse the argument given in the command line of the application */
@@ -1039,7 +1030,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
static struct option lgopts[] = {
{ "sessionless", no_argument, 0, 0 },
- { "cdev_type", required_argument, 0, 0 },
+ { "cdev", required_argument, 0, 0 },
{ "chain", required_argument, 0, 0 },
{ "cipher_algo", required_argument, 0, 0 },
@@ -1053,6 +1044,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
{ "iv", required_argument, 0, 0 },
{ "sessionless", no_argument, 0, 0 },
+
{ NULL, 0, 0, 0 }
};
@@ -1370,15 +1362,17 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n");
/* create the mbuf pool */
- l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 128,
- 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512,
+ sizeof(struct rte_crypto_op),
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
if (l2fwd_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
/* create crypto op pool */
- l2fwd_mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "mbuf_offload_pool", NB_MBUF, 128, 0, rte_socket_id());
- if (l2fwd_mbuf_ol_pool == NULL)
+ l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, 0,
+ rte_socket_id());
+ if (l2fwd_crypto_op_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
/* Enable Ethernet ports */
diff --git a/lib/Makefile b/lib/Makefile
index ef172ea..4c5c1b4 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -36,7 +36,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_EAL) += librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_RING) += librte_ring
DIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += librte_mempool
DIRS-$(CONFIG_RTE_LIBRTE_MBUF) += librte_mbuf
-DIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += librte_mbuf_offload
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += librte_timer
DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile
DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index df0c0b8..489314b 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -44,8 +44,353 @@
extern "C" {
#endif
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
#include <rte_crypto_sym.h>
+/** Crypto operation types */
+enum rte_crypto_op_type {
+ RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ /**< Undefined operation type */
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ /**< Symmetric operation */
+};
+
+/** Status of crypto operation */
+enum rte_crypto_op_status {
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ /**< Operation completed successfully */
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+ /**< Operation has not yet been processed by a crypto device */
+ RTE_CRYPTO_OP_STATUS_ENQUEUED,
+ /**< Operation is enqueued on device */
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
+ /**< Authentication verification failed */
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION,
+ /**<
+ * Symmetric operation failed due to invalid session arguments, or if
+ * in session-less mode, failed to allocate private operation material.
+ */
+ RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
+ /**< Operation failed due to invalid arguments in request */
+ RTE_CRYPTO_OP_STATUS_ERROR,
+ /**< Error handling operation */
+};
+
+/**
+ * Cryptographic Operation.
+ *
+ * This structure contains data relating to performing cryptographic
+ * operations. This operation structure is used to contain any operation which
+ * is supported by the cryptodev API, PMDs should check the type parameter to
+ * verify that the operation is a support function of the device. Crypto
+ * operations are enqueued and dequeued in crypto PMDs using the
+ * rte_cryptodev_enqueue_burst() / rte_cryptodev_dequeue_burst() .
+ */
+struct rte_crypto_op {
+ enum rte_crypto_op_type type;
+ /**< operation type */
+
+ enum rte_crypto_op_status status;
+ /**<
+ * operation status - this is reset to
+ * RTE_CRYPTO_OP_STATUS_NOT_PROCESSED on allocation from mempool and
+ * will be set to RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation
+ * is successfully processed by a crypto PMD
+ */
+
+ struct rte_mempool *mempool;
+ /**< crypto operation mempool which operation is allocated from */
+
+ void *opaque_data;
+ /**< Opaque pointer for user data */
+
+ union {
+ struct rte_crypto_sym_op *sym;
+ /**< Symmetric operation parameters */
+ }; /**< operation specific parameters */
+};
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ * @param type The crypto operation type.
+ */
+static inline void
+__rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type)
+{
+ op->type = type;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (type) {
+ case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
+ /** Symmetric operation structure starts after the end of the
+ * rte_crypto_op structure.
+ */
+ op->sym = (struct rte_crypto_sym_op *)(op + 1);
+ op->type = type;
+
+ __rte_crypto_sym_op_reset(op->sym);
+ break;
+ default:
+ break;
+ }
+
+ op->opaque_data = NULL;
+}
+
+
+/**
+ * Private data structure belonging to a crypto symmetric operation pool.
+ */
+struct rte_crypto_op_pool_private {
+ enum rte_crypto_op_type type;
+ /**< Crypto op pool type operation. */
+ uint16_t priv_size;
+ /**< Size of private area in each crypto operation. */
+};
+
+
+/**
+ * Returns the size of private data allocated with each rte_crypto_op object by
+ * the mempool
+ *
+ * @param mempool rte_crypto_op mempool
+ *
+ * @return private data size
+ */
+static inline uint16_t
+__rte_crypto_op_get_priv_data_size(struct rte_mempool *mempool)
+{
+ struct rte_crypto_op_pool_private *priv =
+ rte_mempool_get_priv(mempool);
+
+ return priv->priv_size;
+}
+
+
+/**
+ * Creates a crypto operation pool
+ *
+ * @param name pool name
+ * @param type crypto operation type, use
+ * RTE_CRYPTO_OP_TYPE_UNDEFINED for a pool which
+ * supports all operation types
+ * @param nb_elts number of elements in pool
+ * @param cache_size Number of elements to cache on lcore, see
+ * *rte_mempool_create* for further details about
+ * cache size
+ * @param priv_size Size of private data to allocate with each
+ * operation
+ * @param socket_id Socket to allocate memory on
+ *
+ * @return
+ * - On success pointer to mempool
+ * - On failure NULL
+ */
+extern struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id);
+
+/**
+ * Bulk allocate raw element from mempool and return as crypto operations
+ *
+ * @param mempool crypto operation mempool.
+ * @param type crypto operation type.
+ * @param ops Array to place allocated crypto operations
+ * @param nb_ops Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns 0
+ * - On failure returns <0
+ */
+static inline int
+__rte_crypto_op_raw_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_crypto_op_pool_private *priv =
+ rte_mempool_get_priv(mempool);
+
+ if (unlikely(priv->type != type &&
+ priv->type != RTE_CRYPTO_OP_TYPE_UNDEFINED))
+ return -EINVAL;
+
+ return rte_mempool_get_bulk(mempool, (void **)ops, nb_ops);
+}
+
+/**
+ * Allocate a crypto operation from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_op *
+rte_crypto_op_alloc(struct rte_mempool *mempool, enum rte_crypto_op_type type)
+{
+ struct rte_crypto_op *op = NULL;
+ int retval;
+
+ retval = __rte_crypto_op_raw_bulk_alloc(mempool, type, &op, 1);
+ if (unlikely(retval < 0))
+ return NULL;
+
+ __rte_crypto_op_reset(op, type);
+
+ return op;
+}
+
+
+static inline int
+rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int retval, i;
+
+ retval = __rte_crypto_op_raw_bulk_alloc(mempool, type, ops, nb_ops);
+ if (unlikely(retval < 0))
+ return retval;
+
+ for (i = 0; i < nb_ops; i++)
+ __rte_crypto_op_reset(ops[i], type);
+
+ return 0;
+}
+
+
+
+/**
+ * Returns a pointer to the private data of a crypto operation if
+ * that operation has enough capacity for requested size.
+ *
+ * @param op crypto operation.
+ * @param size size of space requested in private data.
+ *
+ * @returns
+ * - if sufficient space available returns pointer to start of private data
+ * - if insufficient space returns NULL
+ */
+static inline void *
+__rte_crypto_op_get_priv_data(struct rte_crypto_op *op, uint32_t size)
+{
+ uint32_t priv_size;
+
+ if (likely(op->mempool != NULL)) {
+ priv_size = __rte_crypto_op_get_priv_data_size(op->mempool);
+
+ if (likely(priv_size >= size))
+ return (void *)((op + 1) +
+ sizeof(struct rte_crypto_sym_op));
+ }
+
+ return NULL;
+}
+
+/**
+ * free crypto operation structure
+ * If operation has been allocate from a rte_mempool, then the operation will
+ * be returned to the mempool.
+ *
+ * @param op symmetric crypto operation
+ */
+static inline void
+rte_crypto_op_free(struct rte_crypto_op *op)
+{
+ if (op != NULL && op->mempool != NULL)
+ rte_mempool_put(op->mempool, op);
+}
+
+/**
+ * Allocate a symmetric crypto operation in the private data of an mbuf.
+ *
+ * @param m mbuf which is associated with the crypto operation, the
+ * operation will be allocated in the private data of that
+ * mbuf.
+ *
+ * @returns
+ * - On success returns a pointer to the crypto operation.
+ * - On failure returns NULL.
+ */
+static inline struct rte_crypto_op *
+rte_crypto_sym_op_alloc_from_mbuf_priv_data(struct rte_mbuf *m)
+{
+ if (unlikely(m == NULL))
+ return NULL;
+
+ /*
+ * check that the mbuf's private data size is sufficient to contain a
+ * crypto operation
+ */
+ if (unlikely(m->priv_size < (sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op))))
+ return NULL;
+
+ /* private data starts immediately after the mbuf header in the mbuf. */
+ struct rte_crypto_op *op = (struct rte_crypto_op *)(m + 1);
+
+ __rte_crypto_op_reset(op, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+
+ op->mempool = NULL;
+ op->sym->m_src = m;
+
+ return op;
+}
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type and configures
+ * the chaining of the xforms in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+rte_crypto_op_sym_xforms_alloc(struct rte_crypto_op *op, uint8_t nb_xforms)
+{
+ void *priv_data;
+ uint32_t size;
+
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return NULL;
+
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
+
+ priv_data = __rte_crypto_op_get_priv_data(op, size);
+ if (priv_data == NULL)
+ return NULL;
+
+ return __rte_crypto_sym_op_sym_xforms_alloc(op->sym, priv_data,
+ nb_xforms);
+}
+
+
+/**
+ * Attach a session to a crypto operation
+ *
+ * @param op crypto operation, must be of type symmetric
+ * @param sess cryptodev session
+ */
+static inline int
+rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return -1;
+
+ return __rte_crypto_sym_op_attach_sym_session(op->sym, sess);
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index cb2b8f6..88f2727 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -46,6 +46,8 @@
extern "C" {
#endif
+#include <string.h>
+
#include <rte_mbuf.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -346,37 +348,24 @@ enum rte_crypto_sym_op_sess_type {
};
-/** Status of crypto operation */
-enum rte_crypto_op_status {
- RTE_CRYPTO_OP_STATUS_SUCCESS,
- /**< Operation completed successfully */
- RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
- /**< Operation not yet submitted to a cryptodev */
- RTE_CRYPTO_OP_STATUS_ENQUEUED,
- /**< Operation is enqueued on device */
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
- /**< Authentication verification failed */
- RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
- /**< Operation failed due to invalid arguments in request */
- RTE_CRYPTO_OP_STATUS_ERROR,
- /**< Error handling operation */
-};
+struct rte_cryptodev_sym_session;
/**
- * Cryptographic Operation Data.
+ * Symmetric Cryptographic Operation.
+ *
+ * This structure contains data relating to performing symmetric cryptographic
+ * processing on a referenced mbuf data buffer.
*
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
- * call for performing cipher, hash, or a combined hash and cipher operations.
+ * When a symmetric crypto operation is enqueued with the device for processing
+ * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
+ * which contains the source data which the crypto operation is to be performed
+ * on.
*/
struct rte_crypto_sym_op {
- enum rte_crypto_sym_op_sess_type type;
- enum rte_crypto_op_status status;
+ struct rte_mbuf *m_src; /**< source mbuf */
+ struct rte_mbuf *m_dst; /**< destination mbuf */
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
+ enum rte_crypto_sym_op_sess_type type;
union {
struct rte_cryptodev_sym_session *session;
@@ -387,7 +376,7 @@ struct rte_crypto_sym_op {
struct {
struct {
- uint32_t offset;
+ uint32_t offset;
/**< Starting point for cipher processing, specified
* as number of bytes from start of data in the source
* buffer. The result of the cipher operation will be
@@ -395,7 +384,7 @@ struct rte_crypto_sym_op {
* this location.
*/
- uint32_t length;
+ uint32_t length;
/**< The message length, in bytes, of the source buffer
* on which the cryptographic operation will be
* computed. This must be a multiple of the block size
@@ -414,17 +403,68 @@ struct rte_crypto_sym_op {
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
* field should be set to 0.
*/
- } to_cipher; /**< Data offsets and length for ciphering */
+ } data; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * Initialisation Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length
+ * is 96 bits) or J0 (for other sizes), where J0 is as
+ * defined by NIST SP800-38D. Regardless of the IV
+ * length, a full 16 bytes needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the
+ * nonce should be written starting at &iv[1] (to allow
+ * space for the implementation to write in the flags
+ * in the first byte). Note that a full 16 bytes should
+ * be allocated, even though the length field will
+ * have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from
+ * IEEE Std 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD
+ * be 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ uint16_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * length of the IV (which must be the same as the
+ * block length of the cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length
+ * of the counter (which must be the same as the block
+ * length of the cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs)
+ * or 16, in which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce,
+ * which can be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+ } cipher;
+ struct {
struct {
- uint32_t offset;
+ uint32_t offset;
/**< Starting point for hash processing, specified as
* number of bytes from start of packet in source
* buffer.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
+ * ignored. The field @ref aad field
* should be set instead.
*
* @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
@@ -432,180 +472,169 @@ struct rte_crypto_sym_op {
* of the AAD data in the source buffer.
*/
- uint32_t length;
+ uint32_t length;
/**< The message length, in bytes, of the source
* buffer that the hash will be computed on.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
+ * ignored. The field @ref aad field should be set
+ * instead.
*
* @note
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
* of operation, this field specifies the length of
* the AAD data in the source buffer.
*/
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
-
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
+ } data; /**< Data offsets and length for authentication */
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result
+ * should be inserted (in the case of digest generation)
+ * or where the purported digest exists (in the case of
+ * digest verification).
+ *
+ * At session creation time, the client specified the
+ * digest result length with the digest_length member
+ * of the @ref rte_crypto_auth_xform structure. For
+ * physical crypto devices the caller must allocate at
+ * least digest_length of physically contiguous memory
+ * at this location.
+ *
+ * For digest generation, the digest result will
+ * overwrite any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is
+ * understood to be in the destination buffer for
+ * digest generation, and in the source buffer for
+ * digest verification. The location of the digest
+ * result in this case is immediately following the
+ * region over which the digest is computed.
+ */
+ phys_addr_t phys_addr;
+ /**< Physical address of digest */
+ uint16_t length;
+ /**< Length of digest */
+ } digest; /**< Digest parameters */
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_sym_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
-
- void *user_data;
- /**< opaque pointer for user data */
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD)
+ * needed for authenticated cipher mechanisms (CCM and
+ * GCM), and to the IV for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is
+ * set up for the session in the @ref
+ * rte_crypto_auth_xform structure as part of the @ref
+ * rte_cryptodev_session_create function call. This
+ * length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM),
+ * the caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset
+ * of one byte into the array, leaving room for the
+ * implementation to write in the flags to the first
+ * byte.
+ *
+ * - the additional authentication data itself should
+ * be written starting at an offset of 18 bytes into
+ * the array, leaving room for the length encoding in
+ * the first two bytes of the second block.
+ *
+ * - the array should be big enough to hold the above
+ * fields, plus any padding to round this up to the
+ * nearest multiple of the block size (16 bytes).
+ * Padding will be added by the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus
+ * any space to round this up to the nearest multiple
+ * of the block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set
+ * to 0. Instead the AAD data should be placed in the
+ * source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint16_t length; /**< Length of digest */
+ } aad;
+ /**< Additional authentication parameters */
+ } auth;
};
/**
- * Reset the fields of a crypto operation to their default values.
+ * Reset the fields of a symmetric operation to their default values.
*
* @param op The crypto operation to be reset.
*/
static inline void
__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
{
+ memset(op, 0, sizeof(*op));
+
op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
}
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type to
+ * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
+ * in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+__rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
+ void *priv_data, uint8_t nb_xforms)
+{
+ struct rte_crypto_sym_xform *xform;
+
+ sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
+
+ do {
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
+ xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
+ } while (xform);
+
+ return sym_op->xform;
+}
+
+
+/**
+ * Attach a session to a symmetric crypto operation
+ *
+ * @param op crypto operation
+ * @param sess cryptodev session
+ */
+static inline int
+__rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
struct rte_cryptodev_sym_session *sess)
{
- op->session = sess;
- op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+ sym_op->session = sess;
+ sym_op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+
+ return 0;
}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index f32ddd4..634d248 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1098,3 +1098,77 @@ rte_cryptodev_sym_session_free(uint8_t dev_id,
return NULL;
}
+
+/** Initialise rte_crypto_op mempool element */
+static void
+rte_crypto_op_init(struct rte_mempool *mempool,
+ void *opaque_arg,
+ void *_op_data,
+ __rte_unused unsigned i)
+{
+ struct rte_crypto_op *op = _op_data;
+ enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
+
+ memset(_op_data, 0, mempool->elt_size);
+
+ __rte_crypto_op_reset(op, type);
+
+ op->mempool = mempool;
+}
+
+
+struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id)
+{
+ struct rte_crypto_op_pool_private *priv;
+
+ unsigned elt_size = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) +
+ priv_size;
+
+ /* lookup mempool in case already allocated */
+ struct rte_mempool *mp = rte_mempool_lookup(name);
+ if (mp != NULL) {
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ if (mp->elt_size != elt_size ||
+ mp->cache_size < cache_size ||
+ mp->size < nb_elts ||
+ priv->priv_size < priv_size) {
+ mp = NULL;
+ CDEV_LOG_ERR("Mempool %s already exists but with "
+ "incompatible parameters", name);
+ return NULL;
+ }
+ return mp;
+ }
+
+ mp = rte_mempool_create(
+ name,
+ nb_elts,
+ elt_size,
+ cache_size,
+ sizeof(struct rte_crypto_op_pool_private),
+ NULL,
+ NULL,
+ rte_crypto_op_init,
+ &type,
+ socket_id,
+ 0);
+
+ if (mp == NULL) {
+ CDEV_LOG_ERR("Failed to create mempool %s", name);
+ return NULL;
+ }
+
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ priv->priv_size = priv_size;
+ priv->type = type;
+
+ return mp;
+}
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 732e2b9..5aaa00c 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -58,15 +58,18 @@ extern "C" {
#define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd")
/**< AES-NI Multi buffer PMD device name */
#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd")
-/**< Intel QAT SYM PMD device name */
+/**< Intel QAT Symmetric Crypto PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
- RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT SYM PMD */
+ RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT PMD Symmetric Crypto */
};
+
+extern const char **rte_cyptodev_names;
+
/* Logging Macros */
#define CDEV_LOG_ERR(fmt, args...) \
@@ -411,12 +414,12 @@ rte_cryptodev_callback_unregister(uint8_t dev_id,
rte_cryptodev_cb_fn cb_fn, void *cb_arg);
-typedef uint16_t (*dequeue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Dequeue processed packets from queue pair of a device. */
-typedef uint16_t (*enqueue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Enqueue packets for processing on queue pair of a device. */
@@ -489,66 +492,65 @@ struct rte_cryptodev_data {
extern struct rte_cryptodev *rte_cryptodevs;
/**
*
- * Dequeue a burst of processed packets from a queue of the crypto device.
- * The dequeued packets are stored in *rte_mbuf* structures whose pointers are
- * supplied in the *pkts* array.
+ * Dequeue a burst of processed crypto operations from a queue on the crypto
+ * device. The dequeued operation are stored in *rte_crypto_op* structures
+ * whose pointers are supplied in the *ops* array.
*
- * The rte_crypto_dequeue_burst() function returns the number of packets
- * actually dequeued, which is the number of *rte_mbuf* data structures
- * effectively supplied into the *pkts* array.
+ * The rte_cryptodev_dequeue_burst() function returns the number of ops
+ * actually dequeued, which is the number of *rte_crypto_op* data structures
+ * effectively supplied into the *ops* array.
*
- * A return value equal to *nb_pkts* indicates that the queue contained
- * at least *rx_pkts* packets, and this is likely to signify that other
- * received packets remain in the input queue. Applications implementing
- * a "retrieve as much received packets as possible" policy can check this
- * specific case and keep invoking the rte_crypto_dequeue_burst() function
- * until a value less than *nb_pkts* is returned.
+ * A return value equal to *nb_ops* indicates that the queue contained
+ * at least *nb_ops* operations, and this is likely to signify that other
+ * processed operations remain in the devices output queue. Applications
+ * implementing a "retrieve as many processed operations as possible" policy
+ * can check this specific case and keep invoking the
+ * rte_cryptodev_dequeue_burst() function until a value less than
+ * *nb_ops* is returned.
*
- * The rte_crypto_dequeue_burst() function does not provide any error
+ * The rte_cryptodev_dequeue_burst() function does not provide any error
* notification to avoid the corresponding overhead.
*
- * @param dev_id The identifier of the device.
+ * @param dev_id The symmetric crypto device identifier
* @param qp_id The index of the queue pair from which to
* retrieve processed packets. The value must be
* in the range [0, nb_queue_pair - 1] previously
* supplied to rte_cryptodev_configure().
- * @param pkts The address of an array of pointers to
- * *rte_mbuf* structures that must be large enough
- * to store *nb_pkts* pointers in it.
- * @param nb_pkts The maximum number of packets to dequeue.
+ * @param ops The address of an array of pointers to
+ * *rte_crypto_op* structures that must be
+ * large enough to store *nb_ops* pointers in it.
+ * @param nb_ops The maximum number of operations to dequeue.
*
* @return
- * - The number of packets actually dequeued, which is the number
- * of pointers to *rte_mbuf* structures effectively supplied to the
- * *pkts* array.
+ * - The number of operations actually dequeued, which is the number
+ * of pointers to *rte_crypto_op* structures effectively supplied to the
+ * *ops* array.
*/
static inline uint16_t
rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
- nb_pkts = (*dev->dequeue_burst)
- (dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ nb_ops = (*dev->dequeue_burst)
+ (dev->data->queue_pairs[qp_id], ops, nb_ops);
- return nb_pkts;
+ return nb_ops;
}
/**
- * Enqueue a burst of packets for processing on a crypto device.
+ * Enqueue a burst of operations for processing on a crypto device.
*
- * The rte_crypto_enqueue_burst() function is invoked to place packets
- * on the queue *queue_id* of the device designated by its *dev_id*.
+ * The rte_cryptodev_enqueue_burst() function is invoked to place
+ * crypto operations on the queue *qp_id* of the device designated by
+ * its *dev_id*.
*
- * The *nb_pkts* parameter is the number of packets to process which are
- * supplied in the *pkts* array of *rte_mbuf* structures.
+ * The *nb_ops* parameter is the number of operations to process which are
+ * supplied in the *ops* array of *rte_crypto_op* structures.
*
- * The rte_crypto_enqueue_burst() function returns the number of packets it
- * actually sent. A return value equal to *nb_pkts* means that all packets
- * have been sent.
- *
- * Each mbuf in the *pkts* array must have a valid *rte_mbuf_offload* structure
- * attached which contains a valid crypto operation.
+ * The rte_cryptodev_enqueue_burst() function returns the number of
+ * operations it actually enqueued for processing. A return value equal to
+ * *nb_ops* means that all packets have been enqueued.
*
* @param dev_id The identifier of the device.
* @param qp_id The index of the queue pair which packets are
@@ -556,25 +558,25 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
* must be in the range [0, nb_queue_pairs - 1]
* previously supplied to
* *rte_cryptodev_configure*.
- * @param pkts The address of an array of *nb_pkts* pointers
- * to *rte_mbuf* structures which contain the
- * output packets.
- * @param nb_pkts The number of packets to transmit.
+ * @param ops The address of an array of *nb_ops* pointers
+ * to *rte_crypto_op* structures which contain
+ * the crypto operations to be processed.
+ * @param nb_ops The number of operations to process.
*
* @return
- * The number of packets actually enqueued on the crypto device. The return
- * value can be less than the value of the *nb_pkts* parameter when the
- * crypto devices queue is full or has been filled up.
- * The number of packets is 0 if the device hasn't been started.
+ * The number of operations actually enqueued on the crypto device. The return
+ * value can be less than the value of the *nb_ops* parameter when the
+ * crypto devices queue is full or if invalid parameters are specified in
+ * a *rte_crypto_op*.
*/
static inline uint16_t
rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
return (*dev->enqueue_burst)(
- dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ dev->data->queue_pairs[qp_id], ops, nb_ops);
}
@@ -620,7 +622,6 @@ extern struct rte_cryptodev_sym_session *
rte_cryptodev_sym_session_create(uint8_t dev_id,
struct rte_crypto_sym_xform *xform);
-
/**
* Free the memory associated with a previously allocated session.
*
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index ff8e93d..24e00bb 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -27,6 +27,7 @@ DPDK_2.2 {
rte_cryptodev_queue_pair_setup;
rte_cryptodev_queue_pair_start;
rte_cryptodev_queue_pair_stop;
+ rte_crypto_op_pool_create;
local: *;
};
\ No newline at end of file
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index c973e9b..400a849 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -728,9 +728,6 @@ typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */
typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
* with a single assignment */
-/** Opaque rte_mbuf_offload structure declarations */
-struct rte_mbuf_offload;
-
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
@@ -847,9 +844,6 @@ struct rte_mbuf {
/** Timesync flags for use with IEEE1588. */
uint16_t timesync;
-
- /* Chain of off-load operations to perform on mbuf */
- struct rte_mbuf_offload *offload_ops;
} __rte_cache_aligned;
static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
diff --git a/lib/librte_mbuf_offload/Makefile b/lib/librte_mbuf_offload/Makefile
deleted file mode 100644
index acdb449..0000000
--- a/lib/librte_mbuf_offload/Makefile
+++ /dev/null
@@ -1,52 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2015 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_mbuf_offload.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-EXPORT_MAP := rte_mbuf_offload_version.map
-
-LIBABIVER := 1
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) := rte_mbuf_offload.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD)-include := rte_mbuf_offload.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_cryptodev
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.c b/lib/librte_mbuf_offload/rte_mbuf_offload.c
deleted file mode 100644
index 5c0c9dd..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <rte_common.h>
-
-#include "rte_mbuf_offload.h"
-
-/** Initialize rte_mbuf_offload structure */
-static void
-rte_pktmbuf_offload_init(struct rte_mempool *mp,
- __rte_unused void *opaque_arg,
- void *_op_data,
- __rte_unused unsigned i)
-{
- struct rte_mbuf_offload *ol = _op_data;
-
- memset(_op_data, 0, mp->elt_size);
-
- ol->type = RTE_PKTMBUF_OL_NOT_SPECIFIED;
- ol->mp = mp;
-}
-
-
-struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id)
-{
- struct rte_pktmbuf_offload_pool_private *priv;
- unsigned elt_size = sizeof(struct rte_mbuf_offload) + priv_size;
-
-
- /* lookup mempool in case already allocated */
- struct rte_mempool *mp = rte_mempool_lookup(name);
-
- if (mp != NULL) {
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- if (priv->offload_priv_size < priv_size ||
- mp->elt_size != elt_size ||
- mp->cache_size < cache_size ||
- mp->size < size) {
- mp = NULL;
- return NULL;
- }
- return mp;
- }
-
- mp = rte_mempool_create(
- name,
- size,
- elt_size,
- cache_size,
- sizeof(struct rte_pktmbuf_offload_pool_private),
- NULL,
- NULL,
- rte_pktmbuf_offload_init,
- NULL,
- socket_id,
- 0);
-
- if (mp == NULL)
- return NULL;
-
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- priv->offload_priv_size = priv_size;
- return mp;
-}
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
deleted file mode 100644
index 926ab64..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ /dev/null
@@ -1,307 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_MBUF_OFFLOAD_H_
-#define _RTE_MBUF_OFFLOAD_H_
-
-/**
- * @file
- * RTE mbuf offload
- *
- * The rte_mbuf_offload library provides the ability to specify a device generic
- * off-load operation independent of the current Rx/Tx Ethernet offloads
- * supported within the rte_mbuf structure, and add supports for multiple
- * off-load operations and offload device types.
- *
- * The rte_mbuf_offload specifies the particular off-load operation type, such
- * as a crypto operation, and provides a container for the operations
- * parameter's inside the op union. These parameters are then used by the
- * device which supports that operation to perform the specified offload.
- *
- * This library provides an API to create pre-allocated mempool of offload
- * operations, with supporting allocate and free functions. It also provides
- * APIs for attaching an offload to a mbuf, as well as an API to retrieve a
- * specified offload type from an mbuf offload chain.
- *
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- */
-
-#include <rte_mbuf.h>
-#include <rte_crypto.h>
-
-
-/** packet mbuf offload operation types */
-enum rte_mbuf_ol_op_type {
- RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
- /**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO_SYM
- /**< Crypto offload operation */
-};
-
-/**
- * Generic packet mbuf offload
- * This is used to specify a offload operation to be performed on a rte_mbuf.
- * Multiple offload operations can be chained to the same mbuf, but only a
- * single offload operation of a particular type can be in the chain
- */
-struct rte_mbuf_offload {
- struct rte_mbuf_offload *next; /**< next offload in chain */
- struct rte_mbuf *m; /**< mbuf offload is attached to */
- struct rte_mempool *mp; /**< mempool offload allocated from */
-
- enum rte_mbuf_ol_op_type type; /**< offload type */
- union {
- struct rte_crypto_sym_op crypto; /**< Crypto operation */
- } op;
-};
-
-/**< private data structure belonging to packet mbug offload mempool */
-struct rte_pktmbuf_offload_pool_private {
- uint16_t offload_priv_size;
- /**< Size of private area in each mbuf_offload. */
-};
-
-
-/**
- * Creates a mempool of rte_mbuf_offload objects
- *
- * EXPERIMENTAL: this API file may change without prior notice
- *
- * @param name mempool name
- * @param size number of objects in mempool
- * @param cache_size cache size of objects for each core
- * @param priv_size size of private data to be allocated with each
- * rte_mbuf_offload object
- * @param socket_id Socket on which to allocate mempool objects
- *
- * @return
- * - On success returns a valid mempool of rte_mbuf_offload objects
- * - On failure return NULL
- */
-extern struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id);
-
-
-/**
- * Returns private data size allocated with each rte_mbuf_offload object by
- * the mempool
- *
- * @param mpool rte_mbuf_offload mempool
- *
- * @return private data size
- */
-static inline uint16_t
-__rte_pktmbuf_offload_priv_size(struct rte_mempool *mpool)
-{
- struct rte_pktmbuf_offload_pool_private *priv =
- rte_mempool_get_priv(mpool);
-
- return priv->offload_priv_size;
-}
-
-/**
- * Get specified off-load operation type from mbuf.
- *
- * @param m packet mbuf.
- * @param type offload operation type requested.
- *
- * @return
- * - On success retruns rte_mbuf_offload pointer
- * - On failure returns NULL
- *
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_get(struct rte_mbuf *m, enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol;
-
- for (ol = m->offload_ops; ol != NULL; ol = ol->next)
- if (ol->type == type)
- return ol;
-
- return ol;
-}
-
-/**
- * Attach a rte_mbuf_offload to a mbuf. We only support a single offload of any
- * one type in our chain of offloads.
- *
- * @param m packet mbuf.
- * @param ol rte_mbuf_offload strucutre to be attached
- *
- * @returns
- * - On success returns the pointer to the offload we just added
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_attach(struct rte_mbuf *m, struct rte_mbuf_offload *ol)
-{
- struct rte_mbuf_offload **ol_last;
-
- for (ol_last = &m->offload_ops; ol_last[0] != NULL;
- ol_last = &ol_last[0]->next)
- if (ol_last[0]->type == ol->type)
- return NULL;
-
- ol_last[0] = ol;
- ol_last[0]->m = m;
- ol_last[0]->next = NULL;
-
- return ol_last[0];
-}
-
-
-/** Rearms rte_mbuf_offload default parameters */
-static inline void
-__rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
- enum rte_mbuf_ol_op_type type)
-{
- ol->m = NULL;
- ol->type = type;
-
- switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO_SYM:
- __rte_crypto_sym_op_reset(&ol->op.crypto); break;
- default:
- break;
- }
-}
-
-/** Allocate rte_mbuf_offload from mempool */
-static inline struct rte_mbuf_offload *
-__rte_pktmbuf_offload_raw_alloc(struct rte_mempool *mp)
-{
- void *buf = NULL;
-
- if (rte_mempool_get(mp, &buf) < 0)
- return NULL;
-
- return (struct rte_mbuf_offload *)buf;
-}
-
-/**
- * Allocate a rte_mbuf_offload with a specified operation type from
- * rte_mbuf_offload mempool
- *
- * @param mpool rte_mbuf_offload mempool
- * @param type offload operation type
- *
- * @returns
- * - On success returns a valid rte_mbuf_offload structure
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_alloc(struct rte_mempool *mpool,
- enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol = __rte_pktmbuf_offload_raw_alloc(mpool);
-
- if (ol != NULL)
- __rte_pktmbuf_offload_reset(ol, type);
-
- return ol;
-}
-
-/**
- * free rte_mbuf_offload structure
- */
-static inline void
-rte_pktmbuf_offload_free(struct rte_mbuf_offload *ol)
-{
- if (ol != NULL && ol->mp != NULL)
- rte_mempool_put(ol->mp, ol);
-}
-
-/**
- * Checks if the private data of a rte_mbuf_offload has enough capacity for
- * requested size
- *
- * @returns
- * - if sufficient space available returns pointer to start of private data
- * - if insufficient space returns NULL
- */
-static inline void *
-__rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
- uint16_t size)
-{
- uint16_t priv_size;
-
- if (likely(ol->mp != NULL)) {
- priv_size = __rte_pktmbuf_offload_priv_size(ol->mp);
-
- if (likely(priv_size >= size))
- return (void *)(ol + 1);
- }
- return NULL;
-}
-
-/**
- * Allocate space for crypto xforms in the private data space of the
- * rte_mbuf_offload. This also defaults the crypto xform type and configures
- * the chaining of the xform in the crypto operation
- *
- * @return
- * - On success returns pointer to first crypto xform in crypto operations chain
- * - On failure returns NULL
- */
-static inline struct rte_crypto_sym_xform *
-rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
- unsigned nb_xforms)
-{
- struct rte_crypto_sym_xform *xform;
- void *priv_data;
- uint16_t size;
-
- size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
- priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
-
- if (priv_data == NULL)
- return NULL;
-
- ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
-
- do {
- xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
- xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
- } while (xform);
-
- return ol->op.crypto.xform;
-}
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MBUF_OFFLOAD_H_ */
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map b/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
deleted file mode 100644
index 3d3b06a..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
+++ /dev/null
@@ -1,7 +0,0 @@
-DPDK_2.2 {
- global:
-
- rte_pktmbuf_offload_pool_create;
-
- local: *;
-};
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v2 2/2] cryptodev: change burst API to be crypto op oriented
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 2/2] cryptodev: change burst API to be crypto op oriented Declan Doherty
@ 2016-02-22 11:17 ` Trahe, Fiona
2016-02-22 18:23 ` Trahe, Fiona
2016-02-22 18:56 ` Trahe, Fiona
2 siblings, 0 replies; 62+ messages in thread
From: Trahe, Fiona @ 2016-02-22 11:17 UTC (permalink / raw)
To: Doherty, Declan, dev
Hi Declan,
Build fails after make clean.
Fix is following change to include path.
And one unused include file can be removed.
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index 489314b..5bded98 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -49,7 +49,7 @@ extern "C" {
#include <rte_memory.h>
#include <rte_mempool.h>
-#include <rte_crypto_sym.h>
+#include "rte_crypto_sym.h"
/** Crypto operation types */
enum rte_crypto_op_type {
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 5aaa00c..2e6cd9d 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -48,7 +48,6 @@
extern "C" {
#endif
-#include "stddef.h"
#include "rte_crypto.h"
#include "rte_dev.h"
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Declan Doherty
> Sent: Friday, February 19, 2016 11:01 AM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v2 2/2] cryptodev: change burst API to be crypto op oriented
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v2 2/2] cryptodev: change burst API to be crypto op oriented
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 2/2] cryptodev: change burst API to be crypto op oriented Declan Doherty
2016-02-22 11:17 ` Trahe, Fiona
@ 2016-02-22 18:23 ` Trahe, Fiona
2016-02-22 18:56 ` Trahe, Fiona
2 siblings, 0 replies; 62+ messages in thread
From: Trahe, Fiona @ 2016-02-22 18:23 UTC (permalink / raw)
To: Doherty, Declan, dev
Hi Declan,
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Declan Doherty
> Sent: Friday, February 19, 2016 11:01 AM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v2 2/2] cryptodev: change burst API to be crypto op
> oriented
>
> This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts
> rte_crypto_op's rather than the current implementation which operates on
> rte_mbuf bursts, this simplifies the burst processing in the crypto PMDs and the
> use of crypto operations in general.
>
> This change set also continues the separation of the symmetric operation
> parameters
> from the more general operation parameters, this will simplify the integration of
> asymmetric crypto operations in the future.
>
> As well as the changes to the crypto APIs this patch adds functions for managing
> rte_crypto_op pools to the cryptodev API. It modifies the existing PMDs, unit
> tests and sample application to work with the modified APIs.
>
> Finally this change set removes the now unused rte_mbuf_offload library.
>
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> ---
Delete unused fn below.
> diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
> index df0c0b8..489314b 100644
> --- a/lib/librte_cryptodev/rte_crypto.h
> +++ b/lib/librte_cryptodev/rte_crypto.h
> +
> +/**
> + * Allocate a symmetric crypto operation in the private data of an mbuf.
> + *
> + * @param m mbuf which is associated with the crypto operation, the
> + * operation will be allocated in the private data of that
> + * mbuf.
> + *
> + * @returns
> + * - On success returns a pointer to the crypto operation.
> + * - On failure returns NULL.
> + */
> +static inline struct rte_crypto_op *
> +rte_crypto_sym_op_alloc_from_mbuf_priv_data(struct rte_mbuf *m)
> +{
> + if (unlikely(m == NULL))
> + return NULL;
> +
> + /*
> + * check that the mbuf's private data size is sufficient to contain a
> + * crypto operation
> + */
> + if (unlikely(m->priv_size < (sizeof(struct rte_crypto_op) +
> + sizeof(struct rte_crypto_sym_op))))
> + return NULL;
> +
> + /* private data starts immediately after the mbuf header in the mbuf. */
> + struct rte_crypto_op *op = (struct rte_crypto_op *)(m + 1);
> +
> + __rte_crypto_op_reset(op, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
> +
> + op->mempool = NULL;
> + op->sym->m_src = m;
> +
> + return op;
> +}
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v2 2/2] cryptodev: change burst API to be crypto op oriented
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 2/2] cryptodev: change burst API to be crypto op oriented Declan Doherty
2016-02-22 11:17 ` Trahe, Fiona
2016-02-22 18:23 ` Trahe, Fiona
@ 2016-02-22 18:56 ` Trahe, Fiona
2 siblings, 0 replies; 62+ messages in thread
From: Trahe, Fiona @ 2016-02-22 18:56 UTC (permalink / raw)
To: Doherty, Declan, dev
Hi Declan,
Bug + fix below
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Declan Doherty
> Sent: Friday, February 19, 2016 11:01 AM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v2 2/2] cryptodev: change burst API to be crypto op
> oriented
>
> This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts
> rte_crypto_op's rather than the current implementation which operates on
> rte_mbuf bursts, this simplifies the burst processing in the crypto PMDs and the
> use of crypto operations in general.
>
> This change set also continues the separation of the symmetric operation
> parameters
> from the more general operation parameters, this will simplify the integration of
> asymmetric crypto operations in the future.
>
> As well as the changes to the crypto APIs this patch adds functions for managing
> rte_crypto_op pools to the cryptodev API. It modifies the existing PMDs, unit
> tests and sample application to work with the modified APIs.
>
> Finally this change set removes the now unused rte_mbuf_offload library.
>
>Signed-off-by: Declan Doherty <declan.doherty@intel.com>
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
//snip
+ * Returns a pointer to the private data of a crypto operation if
+ * that operation has enough capacity for requested size.
+ *
+ * @param op crypto operation.
+ * @param size size of space requested in private data.
+ *
+ * @returns
+ * - if sufficient space available returns pointer to start of private data
+ * - if insufficient space returns NULL
+ */
+static inline void *
+__rte_crypto_op_get_priv_data(struct rte_crypto_op *op, uint32_t size)
+{
+ uint32_t priv_size;
+
+ if (likely(op->mempool != NULL)) {
+ priv_size = __rte_crypto_op_get_priv_data_size(op->mempool);
+
+ if (likely(priv_size >= size))
+ return (void *)((op + 1) +
+ sizeof(struct rte_crypto_sym_op));
Jumping by rte_crypto_op increments, should be byte increments, i.e.
return (void *)((uint8_t *)(op + 1) +
sizeof(struct rte_crypto_sym_op));
+ }
+
+ return NULL;
+}
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v3 0/2] cryptodev API changes
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 0/2] cryptodev API changes Declan Doherty
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 1/2] cryptodev: API tidy and changes to support future extensions Declan Doherty
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 2/2] cryptodev: change burst API to be crypto op oriented Declan Doherty
@ 2016-02-26 17:30 ` Declan Doherty
2016-02-26 17:30 ` [dpdk-dev] [PATCH v3 1/2] cryptodev: API tidy and changes to support future extensions Declan Doherty
` (4 more replies)
2016-03-15 7:48 ` [dpdk-dev] [PATCH v2 " Cao, Min
3 siblings, 5 replies; 62+ messages in thread
From: Declan Doherty @ 2016-02-26 17:30 UTC (permalink / raw)
To: dev
This patch set separates the symmetric crypto operations from generic operations
and then modifies the cryptodev burst API to accept bursts of rte_crypto_op
rather than rte_mbufs.
V3:
- Addresses V2 comments
- Rebased for head
Declan Doherty (1):
cryptodev: change burst API to be crypto op oriented
Fiona Trahe (1):
cryptodev: API tidy and changes to support future extensions
MAINTAINERS | 6 +-
app/test/test_cryptodev.c | 894 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 270 ++++---
config/common_bsdapp | 8 -
config/common_linuxapp | 8 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 199 ++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 18 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 6 +-
drivers/crypto/qat/qat_crypto.c | 150 ++--
drivers/crypto/qat/qat_crypto.h | 14 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 300 ++++---
lib/Makefile | 1 -
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 822 ++++++++-----------
lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
lib/librte_cryptodev/rte_cryptodev.h | 185 ++---
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 3 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 -------
.../rte_mbuf_offload_version.map | 7 -
27 files changed, 2146 insertions(+), 2021 deletions(-)
create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
delete mode 100644 lib/librte_mbuf_offload/Makefile
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v3 1/2] cryptodev: API tidy and changes to support future extensions
2016-02-26 17:30 ` [dpdk-dev] [PATCH v3 0/2] cryptodev API changes Declan Doherty
@ 2016-02-26 17:30 ` Declan Doherty
2016-02-26 17:30 ` [dpdk-dev] [PATCH v3 2/2] cryptodev: change burst API to be crypto op oriented Declan Doherty
` (3 subsequent siblings)
4 siblings, 0 replies; 62+ messages in thread
From: Declan Doherty @ 2016-02-26 17:30 UTC (permalink / raw)
To: dev
From: Fiona Trahe <fiona.trahe@intel.com>
This patch splits symmetric specific definitions and functions away from the
common crypto APIs to facilitate the future extension and expansion of the
cryptodev framework, in order to allow asymmetric crypto operations to be
introduced at a later date, as well as to clean the logical structure of the
public includes. The patch also introduces the _sym prefix to symmetric
specific structure and functions to improve clarity in the API.
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
---
app/test/test_cryptodev.c | 164 +++---
app/test/test_cryptodev_perf.c | 79 +--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 44 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 6 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 4 +-
drivers/crypto/qat/qat_crypto.c | 51 +-
drivers/crypto/qat/qat_crypto.h | 10 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 33 +-
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 563 +------------------
lib/librte_cryptodev/rte_crypto_sym.h | 613 +++++++++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 39 +-
lib/librte_cryptodev/rte_cryptodev.h | 80 ++-
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_mbuf_offload/rte_mbuf_offload.h | 22 +-
16 files changed, 912 insertions(+), 837 deletions(-)
create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 62f8fb0..951b443 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,13 +57,13 @@ struct crypto_testsuite_params {
};
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
struct rte_mbuf_offload *ol;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -78,7 +78,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_param);
@@ -165,7 +165,8 @@ testsuite_setup(void)
ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
"MBUF_OFFLOAD_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_xform),
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -220,7 +221,7 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id,
&ts_params->conf),
@@ -275,7 +276,7 @@ ut_setup(void)
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
ts_params->conf.session_mp.nb_objs =
- (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_PMD) ?
+ (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_SYM_PMD) ?
DEFAULT_NUM_OPS_INFLIGHT :
DEFAULT_NUM_OPS_INFLIGHT;
@@ -319,7 +320,7 @@ ut_teardown(void)
/* free crypto session structure */
if (ut_params->sess) {
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
ut_params->sess);
ut_params->sess = NULL;
}
@@ -464,7 +465,7 @@ test_queue_pair_descriptor_setup(void)
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- ts_params->conf.session_mp.nb_objs = dev_info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = dev_info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
&ts_params->conf), "Failed to configure cryptodev %u",
@@ -766,7 +767,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -776,7 +777,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -786,20 +787,21 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -862,18 +864,18 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_xforms(
+ TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
ut_params->ol, 2),
"failed to allocate space for crypto transforms");
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
/* cipher parameters */
ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
@@ -882,7 +884,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
@@ -958,7 +960,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_SHA1);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -967,7 +969,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -977,13 +979,14 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -991,7 +994,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1066,7 +1069,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1075,7 +1078,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1085,13 +1088,14 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1099,7 +1103,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1168,7 +1172,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_SHA256);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1177,7 +1181,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1187,13 +1191,14 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1201,7 +1206,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1282,7 +1287,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1291,7 +1296,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1301,15 +1306,16 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1317,7 +1323,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1371,7 +1377,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params);
@@ -1386,8 +1392,9 @@ test_AES_CBC_HMAC_SHA512_decrypt_digest_verify(void)
"Failed to create session params");
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
return test_AES_CBC_HMAC_SHA512_decrypt_perform(ut_params->sess,
@@ -1400,7 +1407,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
{
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1409,7 +1416,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1422,7 +1429,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params)
{
@@ -1442,7 +1449,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1450,7 +1457,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1522,7 +1529,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1531,7 +1538,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1541,13 +1548,14 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1555,7 +1563,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)
rte_pktmbuf_prepend(ut_params->ibuf,
@@ -1615,7 +1623,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_AES_XCBC);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1624,7 +1632,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1634,13 +1642,14 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1648,7 +1657,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
@@ -1752,7 +1761,7 @@ test_multi_session(void)
struct crypto_unittest_params *ut_params = &unittest_params;
struct rte_cryptodev_info dev_info;
- struct rte_cryptodev_session **sessions;
+ struct rte_cryptodev_sym_session **sessions;
uint16_t i;
@@ -1761,12 +1770,13 @@ test_multi_session(void)
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- sessions = rte_malloc(NULL, (sizeof(struct rte_cryptodev_session *) *
- dev_info.max_nb_sessions) + 1, 0);
+ sessions = rte_malloc(NULL,
+ (sizeof(struct rte_cryptodev_sym_session *) *
+ dev_info.sym.max_nb_sessions) + 1, 0);
/* Create multiple crypto sessions*/
- for (i = 0; i < dev_info.max_nb_sessions; i++) {
- sessions[i] = rte_cryptodev_session_create(
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++) {
+ sessions[i] = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(sessions[i],
@@ -1781,13 +1791,13 @@ test_multi_session(void)
}
/* Next session create should fail */
- sessions[i] = rte_cryptodev_session_create(ts_params->valid_devs[0],
+ sessions[i] = rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NULL(sessions[i],
"Session creation succeeded unexpectedly!");
- for (i = 0; i < dev_info.max_nb_sessions; i++)
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++)
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
sessions[i]);
rte_free(sessions);
@@ -1806,7 +1816,7 @@ test_not_in_place_crypto(void)
/* Create multiple crypto sessions*/
- ut_params->sess = rte_cryptodev_session_create(
+ ut_params->sess = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0], &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1828,7 +1838,7 @@ test_not_in_place_crypto(void)
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1836,7 +1846,7 @@ test_not_in_place_crypto(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1962,7 +1972,7 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite = {
static int
test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_qat_testsuite);
}
static struct test_command cryptodev_qat_cmd = {
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 728bcf0..42dd9bc 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -63,12 +63,12 @@ struct crypto_testsuite_params {
#define MAX_NUM_OF_OPS_PER_UT (128)
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf_offload *ol;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
@@ -127,7 +127,7 @@ testsuite_setup(void)
ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -179,7 +179,7 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->dev_id,
&ts_params->conf),
@@ -252,7 +252,7 @@ ut_teardown(void)
/* free crypto session structure */
if (ut_params->sess)
- rte_cryptodev_session_free(ts_params->dev_id,
+ rte_cryptodev_sym_session_free(ts_params->dev_id,
ut_params->sess);
/* free crypto operation structure */
@@ -1713,7 +1713,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1723,7 +1723,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1733,7 +1733,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1741,7 +1741,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < num_to_submit ; b++) {
tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ (const char *)data_params[0].expected.ciphertext,
data_params[0].length, 0);
TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
@@ -1753,12 +1753,12 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
DIGEST_BYTE_LENGTH_SHA256);
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO);
+ ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
@@ -1814,7 +1814,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, burst_size);
if (burst_received == 0)
failed_polls++;
@@ -1824,10 +1825,13 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
total_cycles += end_cycles - start_cycles;
}
while (num_received != num_to_submit) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num,
+ 0, NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, burst_size);
if (burst_received == 0)
failed_polls++;
@@ -1876,7 +1880,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1885,7 +1889,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
ut_params->cipher_xform.cipher.key.length = CIPHER_IV_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1895,7 +1899,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1928,12 +1932,12 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1957,15 +1961,19 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent) < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent : DEFAULT_BURST_SIZE);
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, tx_mbufs,
+ ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
+ < DEFAULT_BURST_SIZE) ?
+ DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
+ DEFAULT_BURST_SIZE);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
@@ -1973,10 +1981,13 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
num_received += burst_received;
}
while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num,
+ 0, NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num, 0,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num, 0,
rx_mbufs, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
@@ -1987,7 +1998,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
mmps = (double)num_received*mhz/(end_cycles - start_cycles);
throughput = mmps*data_params[index].length*8;
printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
- data_params[index].length, num_sent, num_received);
+ data_params[index].length, num_sent, num_received);
printf("\t%.2f\t%u", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
@@ -2044,7 +2055,7 @@ perftest_aesni_mb_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
static int
perftest_qat_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_testsuite);
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index c7d884c..61d93cd 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -109,7 +109,7 @@ calculate_auth_precomputes(hash_one_block_t one_block_hash,
/** Get xform chain order */
static int
-aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
{
/*
* Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
@@ -119,12 +119,12 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
if (xform->next == NULL || xform->next->next != NULL)
return -1;
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return HASH_CIPHER;
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return CIPHER_HASH;
return -1;
@@ -134,11 +134,11 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
static int
aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
hash_one_block_t hash_oneblock_fn;
- if (xform->type != RTE_CRYPTO_XFORM_AUTH) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
MB_LOG_ERR("Crypto xform struct not of type auth");
return -1;
}
@@ -196,11 +196,11 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
static int
aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
aes_keyexp_t aes_keyexp_fn;
- if (xform->type != RTE_CRYPTO_XFORM_CIPHER) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
MB_LOG_ERR("Crypto xform struct not of type cipher");
return -1;
}
@@ -259,10 +259,10 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
- const struct rte_crypto_xform *auth_xform = NULL;
- const struct rte_crypto_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
/* Select Crypto operation - hash then cipher / cipher then hash */
switch (aesni_mb_get_chain_order(xform)) {
@@ -296,11 +296,11 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
if (unlikely(crypto_op->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
@@ -313,7 +313,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
return NULL;
sess = (struct aesni_mb_session *)
- ((struct rte_cryptodev_session *)_sess)->_private;
+ ((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
sess, crypto_op->xform) != 0)) {
@@ -339,7 +339,8 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
*/
static JOB_AES_HMAC *
process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_op *c_op, struct aesni_mb_session *session)
+ struct rte_crypto_sym_op *c_op,
+ struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
@@ -432,14 +433,14 @@ static struct rte_mbuf *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
struct rte_mbuf *m;
- struct rte_crypto_op *c_op;
+ struct rte_crypto_sym_op *c_op;
if (job->user_data == NULL)
return NULL;
/* handled retrieved job */
m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_op *)job->user_data2;
+ c_op = (struct rte_crypto_sym_op *)job->user_data2;
/* set status as successful by default */
c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -459,7 +460,7 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_OP_SESSIONLESS) {
+ if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
rte_mempool_put(qp->sess_mp, c_op->session);
c_op->session = NULL;
}
@@ -511,7 +512,8 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
int i, processed_jobs = 0;
for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i], RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(bufs[i],
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
qp->qp_stats.enqueue_err_count++;
goto flush_jobs;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index e43b898..3cd9990 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -108,7 +108,7 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
if (dev_info != NULL) {
dev_info->dev_type = dev->dev_type;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->max_nb_sessions = internals->max_nb_sessions;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
}
}
@@ -243,7 +243,7 @@ aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
/** Configure a aesni multi-buffer session from a crypto xform chain */
static void *
aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform, void *sess)
{
struct aesni_mb_private *internals = dev->data->dev_private;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 2f98609..ab70c15 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -218,7 +218,7 @@ struct aesni_mb_session {
extern int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform);
+ const struct rte_crypto_sym_xform *xform);
/** device specific operations function pointer structure */
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 828756b..e7b9027 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,37 +89,37 @@ void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
}
static int
-qat_get_cmd_id(const struct rte_crypto_xform *xform)
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
{
if (xform->next == NULL)
return -1;
/* Cipher Only */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_CIPHER; */
/* Authentication Only */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_AUTH; */
/* Cipher then Authenticate */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
/* Authenticate then Cipher */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
return -1;
}
static struct rte_crypto_auth_xform *
-qat_get_auth_xform(struct rte_crypto_xform *xform)
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return &xform->auth;
xform = xform->next;
@@ -129,10 +129,10 @@ qat_get_auth_xform(struct rte_crypto_xform *xform)
}
static struct rte_crypto_cipher_xform *
-qat_get_cipher_xform(struct rte_crypto_xform *xform)
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return &xform->cipher;
xform = xform->next;
@@ -144,7 +144,7 @@ qat_get_cipher_xform(struct rte_crypto_xform *xform)
void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private)
+ struct rte_crypto_sym_xform *xform, void *session_private)
{
struct qat_pmd_private *internals = dev->data->dev_private;
@@ -261,7 +261,7 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
auth_xform->digest_length))
goto error_out;
- return (struct rte_cryptodev_session *)session;
+ return (struct rte_crypto_sym_session *)session;
error_out:
rte_mempool_put(internals->sess_mp, session);
@@ -275,7 +275,7 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
+uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
register struct qat_queue *queue;
@@ -327,7 +327,8 @@ kick_tail:
}
uint16_t
-qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct rte_mbuf_offload *ol;
struct qat_queue *queue;
@@ -343,12 +344,13 @@ qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
msg_counter != nb_pkts) {
rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf, RTE_PKTMBUF_OL_CRYPTO);
-
+ ol = rte_pktmbuf_offload_get(rx_mbuf,
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ ol->op.crypto.status =
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
@@ -384,20 +386,21 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
"to (%p) mbuf.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
" requests mbuf (%p) is sessionless.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type != RTE_CRYPTODEV_QAT_PMD)) {
+ if (unlikely(ol->op.crypto.session->type
+ != RTE_CRYPTODEV_QAT_SYM_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
@@ -520,8 +523,8 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
ADF_NUM_SYM_QPS_PER_BUNDLE *
ADF_NUM_BUNDLES_PER_DEV;
- info->max_nb_sessions = internals->max_nb_sessions;
- info->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
}
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index d680364..e9f71fe 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -109,16 +109,18 @@ qat_crypto_sym_session_init(struct rte_mempool *mempool, void *priv_sess);
extern void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+ struct rte_crypto_sym_xform *xform, void *session_private);
extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
uint16_t
-qat_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
uint16_t
-qat_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index e500c1e..85700fc 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,11 +89,11 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->pci_dev->addr.devid,
cryptodev->pci_dev->addr.function);
- cryptodev->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
+ cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index a950b74..93de786 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -124,13 +124,13 @@ struct l2fwd_crypto_options {
enum l2fwd_crypto_xform_chain xform_chain;
- struct rte_crypto_xform cipher_xform;
+ struct rte_crypto_sym_xform cipher_xform;
uint8_t ckey_data[32];
- struct rte_crypto_key iv_key;
+ struct rte_crypto_sym_key iv_key;
uint8_t ivkey_data[16];
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform auth_xform;
uint8_t akey_data[128];
};
@@ -142,8 +142,11 @@ struct l2fwd_crypto_params {
unsigned digest_length;
unsigned block_size;
- struct rte_crypto_key iv_key;
- struct rte_cryptodev_session *session;
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } iv;
+ struct rte_cryptodev_sym_session *session;
};
/** lcore configuration */
@@ -372,7 +375,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
/* Append space for digest to end of packet */
ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
@@ -474,11 +477,11 @@ generate_random_key(uint8_t *key, unsigned length)
key[i] = rand() % 0xff;
}
-static struct rte_cryptodev_session *
+static struct rte_cryptodev_sym_session *
initialize_crypto_session(struct l2fwd_crypto_options *options,
uint8_t cdev_id)
{
- struct rte_crypto_xform *first_xform;
+ struct rte_crypto_sym_xform *first_xform;
if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
first_xform = &options->cipher_xform;
@@ -489,7 +492,7 @@ initialize_crypto_session(struct l2fwd_crypto_options *options,
}
/* Setup Cipher Parameters */
- return rte_cryptodev_session_create(cdev_id, first_xform);
+ return rte_cryptodev_sym_session_create(cdev_id, first_xform);
}
static void
@@ -610,7 +613,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
m = pkts_burst[j];
ol = rte_pktmbuf_offload_alloc(
l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
/*
* If we can't allocate a offload, then drop
* the rest of the burst and dequeue and
@@ -689,7 +692,7 @@ parse_cryptodev_type(enum rte_cryptodev_type *type, char *optarg)
*type = RTE_CRYPTODEV_AESNI_MB_PMD;
return 0;
} else if (strcmp("QAT", optarg) == 0) {
- *type = RTE_CRYPTODEV_QAT_PMD;
+ *type = RTE_CRYPTODEV_QAT_SYM_PMD;
return 0;
}
@@ -745,7 +748,7 @@ parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
/** Parse crypto key command line argument */
static int
-parse_key(struct rte_crypto_key *key __rte_unused,
+parse_key(struct rte_crypto_sym_key *key __rte_unused,
unsigned length __rte_unused, char *arg __rte_unused)
{
printf("Currently an unsupported argument!\n");
@@ -982,7 +985,7 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
switch (options->cdev_type) {
case RTE_CRYPTODEV_AESNI_MB_PMD:
printf("cryptodev type: AES-NI MB PMD\n"); break;
- case RTE_CRYPTODEV_QAT_PMD:
+ case RTE_CRYPTODEV_QAT_SYM_PMD:
printf("cryptodev type: QAT PMD\n"); break;
default:
break;
@@ -1179,7 +1182,7 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports)
unsigned i, cdev_id, cdev_count, enabled_cdev_count = 0;
int retval;
- if (options->cdev_type == RTE_CRYPTODEV_QAT_PMD) {
+ if (options->cdev_type == RTE_CRYPTODEV_QAT_SYM_PMD) {
if (rte_cryptodev_count() < nb_ports)
return -1;
} else if (options->cdev_type == RTE_CRYPTODEV_AESNI_MB_PMD) {
diff --git a/lib/librte_cryptodev/Makefile b/lib/librte_cryptodev/Makefile
index 81fa3fc..0d59229 100644
--- a/lib/librte_cryptodev/Makefile
+++ b/lib/librte_cryptodev/Makefile
@@ -45,6 +45,7 @@ SRCS-y += rte_cryptodev.c
# export include files
SYMLINK-y-include += rte_crypto.h
+SYMLINK-y-include += rte_crypto_sym.h
SYMLINK-y-include += rte_cryptodev.h
SYMLINK-y-include += rte_cryptodev_pmd.h
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index 42343a8..df0c0b8 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,572 +36,15 @@
/**
* @file rte_crypto.h
*
- * RTE Cryptographic Definitions
+ * RTE Cryptography Common Definitions
*
- * Defines symmetric cipher and authentication algorithms and modes, as well
- * as supported symmetric crypto operation combinations.
*/
#ifdef __cplusplus
extern "C" {
#endif
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_mempool.h>
-
-/** Symmetric Cipher Algorithms */
-enum rte_crypto_cipher_algorithm {
- RTE_CRYPTO_CIPHER_NULL = 1,
- /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
-
- RTE_CRYPTO_CIPHER_3DES_CBC,
- /**< Triple DES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_3DES_CTR,
- /**< Triple DES algorithm in CTR mode */
- RTE_CRYPTO_CIPHER_3DES_ECB,
- /**< Triple DES algorithm in ECB mode */
-
- RTE_CRYPTO_CIPHER_AES_CBC,
- /**< AES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_AES_CCM,
- /**< AES algorithm in CCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_CCM* element of the
- * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_xform* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation
- */
- RTE_CRYPTO_CIPHER_AES_CTR,
- /**< AES algorithm in Counter mode */
- RTE_CRYPTO_CIPHER_AES_ECB,
- /**< AES algorithm in ECB mode */
- RTE_CRYPTO_CIPHER_AES_F8,
- /**< AES algorithm in F8 mode */
- RTE_CRYPTO_CIPHER_AES_GCM,
- /**< AES algorithm in GCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_GCM* element of the
- * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_setup_data* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation.
- */
- RTE_CRYPTO_CIPHER_AES_XTS,
- /**< AES algorithm in XTS mode */
-
- RTE_CRYPTO_CIPHER_ARC4,
- /**< (A)RC4 cipher algorithm */
-
- RTE_CRYPTO_CIPHER_KASUMI_F8,
- /**< Kasumi algorithm in F8 mode */
-
- RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- /**< SNOW3G algorithm in UEA2 mode */
-
- RTE_CRYPTO_CIPHER_ZUC_EEA3
- /**< ZUC algorithm in EEA3 mode */
-};
-
-/** Symmetric Cipher Direction */
-enum rte_crypto_cipher_operation {
- RTE_CRYPTO_CIPHER_OP_ENCRYPT,
- /**< Encrypt cipher operation */
- RTE_CRYPTO_CIPHER_OP_DECRYPT
- /**< Decrypt cipher operation */
-};
-
-/** Crypto key structure */
-struct rte_crypto_key {
- uint8_t *data; /**< pointer to key data */
- phys_addr_t phys_addr;
- size_t length; /**< key length in bytes */
-};
-
-/**
- * Symmetric Cipher Setup Data.
- *
- * This structure contains data relating to Cipher (Encryption and Decryption)
- * use to create a session.
- */
-struct rte_crypto_cipher_xform {
- enum rte_crypto_cipher_operation op;
- /**< This parameter determines if the cipher operation is an encrypt or
- * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
- * only encrypt operations are valid.
- */
- enum rte_crypto_cipher_algorithm algo;
- /**< Cipher algorithm */
-
- struct rte_crypto_key key;
- /**< Cipher key
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
- * point to a concatenation of the AES encryption key followed by a
- * keymask. As per RFC3711, the keymask should be padded with trailing
- * bytes to match the length of the encryption key used.
- *
- * For AES-XTS mode of operation, two keys must be provided and
- * key.data must point to the two keys concatenated together (Key1 ||
- * Key2). The cipher key length will contain the total size of both
- * keys.
- *
- * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
- * 192 bits (24 bytes) or 256 bits (32 bytes).
- *
- * For the CCM mode of operation, the only supported key length is 128
- * bits (16 bytes).
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
- * should be set to the combined length of the encryption key and the
- * keymask. Since the keymask and the encryption key are the same size,
- * key.length should be set to 2 x the AES encryption key length.
- *
- * For the AES-XTS mode of operation:
- * - Two keys must be provided and key.length refers to total length of
- * the two keys.
- * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
- * - Both keys must have the same size.
- **/
-};
-
-/** Symmetric Authentication / Hash Algorithms */
-enum rte_crypto_auth_algorithm {
- RTE_CRYPTO_AUTH_NULL = 1,
- /**< NULL hash algorithm. */
-
- RTE_CRYPTO_AUTH_AES_CBC_MAC,
- /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
- RTE_CRYPTO_AUTH_AES_CCM,
- /**< AES algorithm in CCM mode. This is an authenticated cipher. When
- * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
- * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
- * set up the related rte_crypto_cipher_setup_data structure in the
- * session context or the corresponding parameter in the crypto
- * operation data structures op_params parameter MUST be set for a
- * session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_CMAC,
- /**< AES CMAC algorithm. */
- RTE_CRYPTO_AUTH_AES_GCM,
- /**< AES algorithm in GCM mode. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_GMAC,
- /**< AES GMAC algorithm. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_XCBC_MAC,
- /**< AES XCBC algorithm. */
-
- RTE_CRYPTO_AUTH_KASUMI_F9,
- /**< Kasumi algorithm in F9 mode. */
-
- RTE_CRYPTO_AUTH_MD5,
- /**< MD5 algorithm */
- RTE_CRYPTO_AUTH_MD5_HMAC,
- /**< HMAC using MD5 algorithm */
-
- RTE_CRYPTO_AUTH_SHA1,
- /**< 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA1_HMAC,
- /**< HMAC using 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224,
- /**< 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224_HMAC,
- /**< HMAC using 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256,
- /**< 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256_HMAC,
- /**< HMAC using 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384,
- /**< 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384_HMAC,
- /**< HMAC using 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512,
- /**< 512 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512_HMAC,
- /**< HMAC using 512 bit SHA algorithm. */
-
- RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- /**< SNOW3G algorithm in UIA2 mode. */
-
- RTE_CRYPTO_AUTH_ZUC_EIA3,
- /**< ZUC algorithm in EIA3 mode */
-};
-
-/** Symmetric Authentication / Hash Operations */
-enum rte_crypto_auth_operation {
- RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
- RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
-};
-
-/**
- * Authentication / Hash transform data.
- *
- * This structure contains data relating to an authentication/hash crypto
- * transforms. The fields op, algo and digest_length are common to all
- * authentication transforms and MUST be set.
- */
-struct rte_crypto_auth_xform {
- enum rte_crypto_auth_operation op;
- /**< Authentication operation type */
- enum rte_crypto_auth_algorithm algo;
- /**< Authentication algorithm selection */
-
- struct rte_crypto_key key; /**< Authentication key data.
- * The authentication key length MUST be less than or equal to the
- * block size of the algorithm. It is the callers responsibility to
- * ensure that the key length is compliant with the standard being used
- * (for example RFC 2104, FIPS 198a).
- */
-
- uint32_t digest_length;
- /**< Length of the digest to be returned. If the verify option is set,
- * this specifies the length of the digest to be compared for the
- * session.
- *
- * If the value is less than the maximum length allowed by the hash,
- * the result shall be truncated. If the value is greater than the
- * maximum length allowed by the hash then an error will be generated
- * by *rte_cryptodev_session_create* or by the
- * *rte_cryptodev_enqueue_burst* if using session-less APIs.
- */
-
- uint32_t add_auth_data_length;
- /**< The length of the additional authenticated data (AAD) in bytes.
- * The maximum permitted value is 240 bytes, unless otherwise specified
- * below.
- *
- * This field must be specified when the hash algorithm is one of the
- * following:
- *
- * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
- * length of the IV (which should be 16).
- *
- * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
- * the length of the Additional Authenticated Data (called A, in NIST
- * SP800-38D).
- *
- * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
- * the length of the associated data (called A, in NIST SP800-38C).
- * Note that this does NOT include the length of any padding, or the
- * 18 bytes reserved at the start of the above field to store the
- * block B0 and the encoded length. The maximum permitted value in
- * this case is 222 bytes.
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
- * this field is not used and should be set to 0. Instead the length
- * of the AAD data is specified in the message length to hash field of
- * the rte_crypto_op_data structure.
- */
-};
-
-/** Crypto transformation types */
-enum rte_crypto_xform_type {
- RTE_CRYPTO_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
- RTE_CRYPTO_XFORM_AUTH, /**< Authentication xform */
- RTE_CRYPTO_XFORM_CIPHER /**< Cipher xform */
-};
-
-/**
- * Crypto transform structure.
- *
- * This is used to specify the crypto transforms required, multiple transforms
- * can be chained together to specify a chain transforms such as authentication
- * then cipher, or cipher then authentication. Each transform structure can
- * hold a single transform, the type field is used to specify which transform
- * is contained within the union
- */
-struct rte_crypto_xform {
- struct rte_crypto_xform *next; /**< next xform in chain */
-
- enum rte_crypto_xform_type type; /**< xform type */
- union {
- struct rte_crypto_auth_xform auth;
- /**< Authentication / hash xform */
- struct rte_crypto_cipher_xform cipher;
- /**< Cipher xform */
- };
-};
-
-/**
- * Crypto operation session type. This is used to specify whether a crypto
- * operation has session structure attached for immutable parameters or if all
- * operation information is included in the operation data structure.
- */
-enum rte_crypto_op_sess_type {
- RTE_CRYPTO_OP_WITH_SESSION, /**< Session based crypto operation */
- RTE_CRYPTO_OP_SESSIONLESS /**< Session-less crypto operation */
-};
-
-/** Status of crypto operation */
-enum rte_crypto_op_status {
- RTE_CRYPTO_OP_STATUS_SUCCESS,
- /**< Operation completed successfully */
- RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
- /**< Operation not yet submitted to a cryptodev */
- RTE_CRYPTO_OP_STATUS_ENQUEUED,
- /**< Operation is enqueued on device */
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
- /**< Authentication verification failed */
- RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
- /**< Operation failed due to invalid arguments in request */
- RTE_CRYPTO_OP_STATUS_ERROR,
- /**< Error handling operation */
-};
-
-/**
- * Cryptographic Operation Data.
- *
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_enqueue_burst() call
- * for performing cipher, hash, or a combined hash and cipher operations.
- */
-struct rte_crypto_op {
- enum rte_crypto_op_sess_type type;
- enum rte_crypto_op_status status;
-
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
-
- union {
- struct rte_cryptodev_session *session;
- /**< Handle for the initialised session context */
- struct rte_crypto_xform *xform;
- /**< Session-less API crypto operation parameters */
- };
-
- struct {
- struct {
- uint32_t offset;
- /**< Starting point for cipher processing, specified
- * as number of bytes from start of data in the source
- * buffer. The result of the cipher operation will be
- * written back into the output buffer starting at
- * this location.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source buffer
- * on which the cryptographic operation will be
- * computed. This must be a multiple of the block size
- * if a block cipher is being used. This is also the
- * same as the result length.
- *
- * @note
- * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
- * this value should not include the length of the
- * padding or the length of the MAC; the driver will
- * compute the actual number of bytes over which the
- * encryption will occur, which will include these
- * values.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
- * field should be set to 0.
- */
- } to_cipher; /**< Data offsets and length for ciphering */
-
- struct {
- uint32_t offset;
- /**< Starting point for hash processing, specified as
- * number of bytes from start of packet in source
- * buffer.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
- * mode of operation, this field specifies the start
- * of the AAD data in the source buffer.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source
- * buffer that the hash will be computed on.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
- * of operation, this field specifies the length of
- * the AAD data in the source buffer.
- */
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
-
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
-
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
-
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
-
- void *user_data;
- /**< opaque pointer for user data */
-};
-
-
-/**
- * Reset the fields of a crypto operation to their default values.
- *
- * @param op The crypto operation to be reset.
- */
-static inline void
-__rte_crypto_op_reset(struct rte_crypto_op *op)
-{
- op->type = RTE_CRYPTO_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
-}
-
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_op_attach_session(struct rte_crypto_op *op,
- struct rte_cryptodev_session *sess)
-{
- op->session = sess;
- op->type = RTE_CRYPTO_OP_WITH_SESSION;
-}
+#include <rte_crypto_sym.h>
#ifdef __cplusplus
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
new file mode 100644
index 0000000..cb2b8f6
--- /dev/null
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -0,0 +1,613 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_SYM_H_
+#define _RTE_CRYPTO_SYM_H_
+
+/**
+ * @file rte_crypto_sym.h
+ *
+ * RTE Definitions for Symmetric Cryptography
+ *
+ * Defines symmetric cipher and authentication algorithms and modes, as well
+ * as supported symmetric crypto operation combinations.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+
+/** Symmetric Cipher Algorithms */
+enum rte_crypto_cipher_algorithm {
+ RTE_CRYPTO_CIPHER_NULL = 1,
+ /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
+
+ RTE_CRYPTO_CIPHER_3DES_CBC,
+ /**< Triple DES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_3DES_CTR,
+ /**< Triple DES algorithm in CTR mode */
+ RTE_CRYPTO_CIPHER_3DES_ECB,
+ /**< Triple DES algorithm in ECB mode */
+
+ RTE_CRYPTO_CIPHER_AES_CBC,
+ /**< AES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_AES_CCM,
+ /**< AES algorithm in CCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_CCM* element of the
+ * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_xform* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation
+ */
+ RTE_CRYPTO_CIPHER_AES_CTR,
+ /**< AES algorithm in Counter mode */
+ RTE_CRYPTO_CIPHER_AES_ECB,
+ /**< AES algorithm in ECB mode */
+ RTE_CRYPTO_CIPHER_AES_F8,
+ /**< AES algorithm in F8 mode */
+ RTE_CRYPTO_CIPHER_AES_GCM,
+ /**< AES algorithm in GCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_GCM* element of the
+ * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_setup_data* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_CIPHER_AES_XTS,
+ /**< AES algorithm in XTS mode */
+
+ RTE_CRYPTO_CIPHER_ARC4,
+ /**< (A)RC4 cipher algorithm */
+
+ RTE_CRYPTO_CIPHER_KASUMI_F8,
+ /**< Kasumi algorithm in F8 mode */
+
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ /**< SNOW3G algorithm in UEA2 mode */
+
+ RTE_CRYPTO_CIPHER_ZUC_EEA3
+ /**< ZUC algorithm in EEA3 mode */
+};
+
+/** Symmetric Cipher Direction */
+enum rte_crypto_cipher_operation {
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ /**< Encrypt cipher operation */
+ RTE_CRYPTO_CIPHER_OP_DECRYPT
+ /**< Decrypt cipher operation */
+};
+
+/**
+ * Symmetric Cipher Setup Data.
+ *
+ * This structure contains data relating to Cipher (Encryption and Decryption)
+ * use to create a session.
+ */
+struct rte_crypto_cipher_xform {
+ enum rte_crypto_cipher_operation op;
+ /**< This parameter determines if the cipher operation is an encrypt or
+ * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
+ * only encrypt operations are valid.
+ */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< Cipher algorithm */
+
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } key;
+ /**< Cipher key
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
+ * point to a concatenation of the AES encryption key followed by a
+ * keymask. As per RFC3711, the keymask should be padded with trailing
+ * bytes to match the length of the encryption key used.
+ *
+ * For AES-XTS mode of operation, two keys must be provided and
+ * key.data must point to the two keys concatenated together (Key1 ||
+ * Key2). The cipher key length will contain the total size of both
+ * keys.
+ *
+ * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
+ * 192 bits (24 bytes) or 256 bits (32 bytes).
+ *
+ * For the CCM mode of operation, the only supported key length is 128
+ * bits (16 bytes).
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
+ * should be set to the combined length of the encryption key and the
+ * keymask. Since the keymask and the encryption key are the same size,
+ * key.length should be set to 2 x the AES encryption key length.
+ *
+ * For the AES-XTS mode of operation:
+ * - Two keys must be provided and key.length refers to total length of
+ * the two keys.
+ * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
+ * - Both keys must have the same size.
+ **/
+};
+
+/** Symmetric Authentication / Hash Algorithms */
+enum rte_crypto_auth_algorithm {
+ RTE_CRYPTO_AUTH_NULL = 1,
+ /**< NULL hash algorithm. */
+
+ RTE_CRYPTO_AUTH_AES_CBC_MAC,
+ /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
+ RTE_CRYPTO_AUTH_AES_CCM,
+ /**< AES algorithm in CCM mode. This is an authenticated cipher. When
+ * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
+ * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
+ * set up the related rte_crypto_cipher_setup_data structure in the
+ * session context or the corresponding parameter in the crypto
+ * operation data structures op_params parameter MUST be set for a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_CMAC,
+ /**< AES CMAC algorithm. */
+ RTE_CRYPTO_AUTH_AES_GCM,
+ /**< AES algorithm in GCM mode. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_GMAC,
+ /**< AES GMAC algorithm. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ /**< AES XCBC algorithm. */
+
+ RTE_CRYPTO_AUTH_KASUMI_F9,
+ /**< Kasumi algorithm in F9 mode. */
+
+ RTE_CRYPTO_AUTH_MD5,
+ /**< MD5 algorithm */
+ RTE_CRYPTO_AUTH_MD5_HMAC,
+ /**< HMAC using MD5 algorithm */
+
+ RTE_CRYPTO_AUTH_SHA1,
+ /**< 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA1_HMAC,
+ /**< HMAC using 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224,
+ /**< 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224_HMAC,
+ /**< HMAC using 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256,
+ /**< 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256_HMAC,
+ /**< HMAC using 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384,
+ /**< 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384_HMAC,
+ /**< HMAC using 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512,
+ /**< 512 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512_HMAC,
+ /**< HMAC using 512 bit SHA algorithm. */
+
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ /**< SNOW3G algorithm in UIA2 mode. */
+
+ RTE_CRYPTO_AUTH_ZUC_EIA3,
+ /**< ZUC algorithm in EIA3 mode */
+};
+
+/** Symmetric Authentication / Hash Operations */
+enum rte_crypto_auth_operation {
+ RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
+ RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
+};
+
+/**
+ * Authentication / Hash transform data.
+ *
+ * This structure contains data relating to an authentication/hash crypto
+ * transforms. The fields op, algo and digest_length are common to all
+ * authentication transforms and MUST be set.
+ */
+struct rte_crypto_auth_xform {
+ enum rte_crypto_auth_operation op;
+ /**< Authentication operation type */
+ enum rte_crypto_auth_algorithm algo;
+ /**< Authentication algorithm selection */
+
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } key;
+ /**< Authentication key data.
+ * The authentication key length MUST be less than or equal to the
+ * block size of the algorithm. It is the callers responsibility to
+ * ensure that the key length is compliant with the standard being used
+ * (for example RFC 2104, FIPS 198a).
+ */
+
+ uint32_t digest_length;
+ /**< Length of the digest to be returned. If the verify option is set,
+ * this specifies the length of the digest to be compared for the
+ * session.
+ *
+ * If the value is less than the maximum length allowed by the hash,
+ * the result shall be truncated. If the value is greater than the
+ * maximum length allowed by the hash then an error will be generated
+ * by *rte_cryptodev_sym_session_create* or by the
+ * *rte_cryptodev_sym_enqueue_burst* if using session-less APIs.
+ */
+
+ uint32_t add_auth_data_length;
+ /**< The length of the additional authenticated data (AAD) in bytes.
+ * The maximum permitted value is 240 bytes, unless otherwise specified
+ * below.
+ *
+ * This field must be specified when the hash algorithm is one of the
+ * following:
+ *
+ * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
+ * length of the IV (which should be 16).
+ *
+ * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
+ * the length of the Additional Authenticated Data (called A, in NIST
+ * SP800-38D).
+ *
+ * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
+ * the length of the associated data (called A, in NIST SP800-38C).
+ * Note that this does NOT include the length of any padding, or the
+ * 18 bytes reserved at the start of the above field to store the
+ * block B0 and the encoded length. The maximum permitted value in
+ * this case is 222 bytes.
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
+ * this field is not used and should be set to 0. Instead the length
+ * of the AAD data is specified in the message length to hash field of
+ * the rte_crypto_sym_op_data structure.
+ */
+};
+
+/** Crypto transformation types */
+enum rte_crypto_sym_xform_type {
+ RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
+ RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */
+ RTE_CRYPTO_SYM_XFORM_CIPHER /**< Cipher xform */
+};
+
+/**
+ * Symmetric crypto transform structure.
+ *
+ * This is used to specify the crypto transforms required, multiple transforms
+ * can be chained together to specify a chain transforms such as authentication
+ * then cipher, or cipher then authentication. Each transform structure can
+ * hold a single transform, the type field is used to specify which transform
+ * is contained within the union
+ */
+struct rte_crypto_sym_xform {
+ struct rte_crypto_sym_xform *next;
+ /**< next xform in chain */
+ enum rte_crypto_sym_xform_type type
+ ; /**< xform type */
+ union {
+ struct rte_crypto_auth_xform auth;
+ /**< Authentication / hash xform */
+ struct rte_crypto_cipher_xform cipher;
+ /**< Cipher xform */
+ };
+};
+
+/**
+ * Crypto operation session type. This is used to specify whether a crypto
+ * operation has session structure attached for immutable parameters or if all
+ * operation information is included in the operation data structure.
+ */
+enum rte_crypto_sym_op_sess_type {
+ RTE_CRYPTO_SYM_OP_WITH_SESSION, /**< Session based crypto operation */
+ RTE_CRYPTO_SYM_OP_SESSIONLESS /**< Session-less crypto operation */
+};
+
+
+/** Status of crypto operation */
+enum rte_crypto_op_status {
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ /**< Operation completed successfully */
+ RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
+ /**< Operation not yet submitted to a cryptodev */
+ RTE_CRYPTO_OP_STATUS_ENQUEUED,
+ /**< Operation is enqueued on device */
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
+ /**< Authentication verification failed */
+ RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
+ /**< Operation failed due to invalid arguments in request */
+ RTE_CRYPTO_OP_STATUS_ERROR,
+ /**< Error handling operation */
+};
+
+/**
+ * Cryptographic Operation Data.
+ *
+ * This structure contains data relating to performing cryptographic processing
+ * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
+ * call for performing cipher, hash, or a combined hash and cipher operations.
+ */
+struct rte_crypto_sym_op {
+ enum rte_crypto_sym_op_sess_type type;
+ enum rte_crypto_op_status status;
+
+ struct {
+ struct rte_mbuf *m; /**< Destination mbuf */
+ uint8_t offset; /**< Data offset */
+ } dst;
+
+ union {
+ struct rte_cryptodev_sym_session *session;
+ /**< Handle for the initialised session context */
+ struct rte_crypto_sym_xform *xform;
+ /**< Session-less API crypto operation parameters */
+ };
+
+ struct {
+ struct {
+ uint32_t offset;
+ /**< Starting point for cipher processing, specified
+ * as number of bytes from start of data in the source
+ * buffer. The result of the cipher operation will be
+ * written back into the output buffer starting at
+ * this location.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source buffer
+ * on which the cryptographic operation will be
+ * computed. This must be a multiple of the block size
+ * if a block cipher is being used. This is also the
+ * same as the result length.
+ *
+ * @note
+ * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
+ * this value should not include the length of the
+ * padding or the length of the MAC; the driver will
+ * compute the actual number of bytes over which the
+ * encryption will occur, which will include these
+ * values.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
+ * field should be set to 0.
+ */
+ } to_cipher; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint32_t offset;
+ /**< Starting point for hash processing, specified as
+ * number of bytes from start of packet in source
+ * buffer.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
+ * mode of operation, this field specifies the start
+ * of the AAD data in the source buffer.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source
+ * buffer that the hash will be computed on.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
+ * of operation, this field specifies the length of
+ * the AAD data in the source buffer.
+ */
+ } to_hash; /**< Data offsets and length for authentication */
+ } data; /**< Details of data to be operated on */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
+ * Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length is 96
+ * bits) or J0 (for other sizes), where J0 is as defined by
+ * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
+ * needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the nonce
+ * should be written starting at &iv[1] (to allow space for the
+ * implementation to write in the flags in the first byte).
+ * Note that a full 16 bytes should be allocated, even though
+ * the length field will have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
+ * 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD be
+ * 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ size_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the length of the
+ * IV (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length of the
+ * counter (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
+ * which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce, which can
+ * be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result should be
+ * inserted (in the case of digest generation) or where the
+ * purported digest exists (in the case of digest
+ * verification).
+ *
+ * At session creation time, the client specified the digest
+ * result length with the digest_length member of the @ref
+ * rte_crypto_auth_xform structure. For physical crypto
+ * devices the caller must allocate at least digest_length of
+ * physically contiguous memory at this location.
+ *
+ * For digest generation, the digest result will overwrite
+ * any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is understood
+ * to be in the destination buffer for digest generation, and
+ * in the source buffer for digest verification. The location
+ * of the digest result in this case is immediately following
+ * the region over which the digest is computed.
+ */
+ phys_addr_t phys_addr; /**< Physical address of digest */
+ uint32_t length; /**< Length of digest */
+ } digest; /**< Digest parameters */
+
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD) needed for
+ * authenticated cipher mechanisms (CCM and GCM), and to the IV
+ * for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is set up
+ * for the session in the @ref rte_crypto_auth_xform structure
+ * as part of the @ref rte_cryptodev_sym_session_create function
+ * call. This length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
+ * caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset of one
+ * byte into the array, leaving room for the implementation
+ * to write in the flags to the first byte.
+ *
+ * - the additional authentication data itself should be
+ * written starting at an offset of 18 bytes into the array,
+ * leaving room for the length encoding in the first two
+ * bytes of the second block.
+ *
+ * - the array should be big enough to hold the above fields,
+ * plus any padding to round this up to the nearest multiple
+ * of the block size (16 bytes). Padding will be added by
+ * the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus any
+ * space to round this up to the nearest multiple of the
+ * block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set to 0.
+ * Instead the AAD data should be placed in the source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint32_t length; /**< Length of digest */
+ } additional_auth;
+ /**< Additional authentication parameters */
+
+ struct rte_mempool *pool;
+ /**< mempool used to allocate crypto op */
+
+ void *user_data;
+ /**< opaque pointer for user data */
+};
+
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ */
+static inline void
+__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
+{
+ op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
+ op->dst.m = NULL;
+ op->dst.offset = 0;
+}
+
+/** Attach a session to a crypto operation */
+static inline void
+rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ op->session = sess;
+ op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTO_SYM_H_ */
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 2838852..c7fef6a 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -572,8 +572,8 @@ rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id);
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id);
int
rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
@@ -604,8 +604,10 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
}
/* Setup Session mempool for device */
- return rte_crypto_session_pool_create(dev, config->session_mp.nb_objs,
- config->session_mp.cache_size, config->socket_id);
+ return rte_cryptodev_sym_session_pool_create(dev,
+ config->session_mp.nb_objs,
+ config->session_mp.cache_size,
+ config->socket_id);
}
@@ -911,12 +913,12 @@ rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
static void
-rte_crypto_session_init(struct rte_mempool *mp,
+rte_cryptodev_sym_session_init(struct rte_mempool *mp,
void *opaque_arg,
void *_sess,
__rte_unused unsigned i)
{
- struct rte_cryptodev_session *sess = _sess;
+ struct rte_cryptodev_sym_session *sess = _sess;
struct rte_cryptodev *dev = opaque_arg;
memset(sess, 0, mp->elt_size);
@@ -930,8 +932,8 @@ rte_crypto_session_init(struct rte_mempool *mp,
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id)
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id)
{
char mp_name[RTE_CRYPTODEV_NAME_MAX_LEN];
unsigned priv_sess_size;
@@ -951,7 +953,7 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
return -ENOMEM;
}
- unsigned elt_size = sizeof(struct rte_cryptodev_session) +
+ unsigned elt_size = sizeof(struct rte_cryptodev_sym_session) +
priv_sess_size;
dev->data->session_pool = rte_mempool_lookup(mp_name);
@@ -975,7 +977,8 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
0, /* private data size */
NULL, /* obj initialization constructor */
NULL, /* obj initialization constructor arg */
- rte_crypto_session_init, /* obj constructor */
+ rte_cryptodev_sym_session_init,
+ /**< obj constructor*/
dev, /* obj constructor arg */
socket_id, /* socket id */
0); /* flags */
@@ -990,11 +993,12 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
return 0;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform)
{
struct rte_cryptodev *dev;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
void *_sess;
if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
@@ -1010,7 +1014,7 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
return NULL;
}
- sess = (struct rte_cryptodev_session *)_sess;
+ sess = (struct rte_cryptodev_sym_session *)_sess;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_configure, NULL);
if (dev->dev_ops->session_configure(dev, xform, sess->_private) ==
@@ -1026,8 +1030,9 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
return sess;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id, struct rte_cryptodev_session *sess)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess)
{
struct rte_cryptodev *dev;
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 892375d..732e2b9 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -1,6 +1,6 @@
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,14 +57,14 @@ extern "C" {
/**< Null crypto PMD device name */
#define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd")
/**< AES-NI Multi buffer PMD device name */
-#define CRYPTODEV_NAME_QAT_PMD ("cryptodev_qat_pmd")
-/**< Intel QAT PMD device name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd")
+/**< Intel QAT SYM PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
- RTE_CRYPTODEV_QAT_PMD, /**< QAT PMD */
+ RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT SYM PMD */
};
/* Logging Macros */
@@ -99,8 +99,11 @@ struct rte_cryptodev_info {
unsigned max_nb_queue_pairs;
/**< Maximum number of queues pairs supported by device. */
- unsigned max_nb_sessions;
- /**< Maximum number of sessions supported by device. */
+
+ struct {
+ unsigned max_nb_sessions;
+ /**< Maximum number of sessions supported by device. */
+ } sym;
};
#define RTE_CRYPTODEV_DETACHED (0)
@@ -130,17 +133,6 @@ struct rte_cryptodev_qp_conf {
typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
enum rte_cryptodev_event_type event, void *cb_arg);
-#ifdef RTE_CRYPTODEV_PERF
-/**
- * Crypto Device performance counter statistics structure. This structure is
- * used for RDTSC counters for measuring crypto operations.
- */
-struct rte_cryptodev_perf_stats {
- uint64_t t_accumlated; /**< Accumulated time processing operation */
- uint64_t t_min; /**< Max time */
- uint64_t t_max; /**< Min time */
-};
-#endif
/** Crypto Device statistics */
struct rte_cryptodev_stats {
@@ -153,29 +145,8 @@ struct rte_cryptodev_stats {
/**< Total error count on operations enqueued */
uint64_t dequeue_err_count;
/**< Total error count on operations dequeued */
+};
-#ifdef RTE_CRYPTODEV_DETAILED_STATS
- struct {
- uint64_t encrypt_ops; /**< Count of encrypt operations */
- uint64_t encrypt_bytes; /**< Number of bytes encrypted */
-
- uint64_t decrypt_ops; /**< Count of decrypt operations */
- uint64_t decrypt_bytes; /**< Number of bytes decrypted */
- } cipher; /**< Cipher operations stats */
-
- struct {
- uint64_t generate_ops; /**< Count of generate operations */
- uint64_t bytes_hashed; /**< Number of bytes hashed */
-
- uint64_t verify_ops; /**< Count of verify operations */
- uint64_t bytes_verified;/**< Number of bytes verified */
- } hash; /**< Hash operations stats */
-#endif
-
-#ifdef RTE_CRYPTODEV_PERF
- struct rte_cryptodev_perf_stats op_perf; /**< Operations stats */
-#endif
-} __rte_cache_aligned;
/**
* Create a virtual crypto device
@@ -607,6 +578,23 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
}
+/** Cryptodev symmetric crypto session */
+struct rte_cryptodev_sym_session {
+ struct {
+ uint8_t dev_id;
+ /**< Device Id */
+ enum rte_cryptodev_type type;
+ /** Crypto Device type session created on */
+ struct rte_mempool *mp;
+ /**< Mempool session allocated from */
+ } __rte_aligned(8);
+ /**< Public symmetric session details */
+
+ char _private[0];
+ /**< Private session material */
+};
+
+
/**
* Initialise a session for symmetric cryptographic operations.
*
@@ -628,9 +616,9 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
* @return
* Pointer to the created session or NULL
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id,
- struct rte_crypto_xform *xform);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform);
/**
@@ -638,15 +626,15 @@ rte_cryptodev_session_create(uint8_t dev_id,
*
* @param dev_id The device identifier.
* @param session Session pointer previously allocated by
- * *rte_cryptodev_session_create*.
+ * *rte_cryptodev_sym_session_create*.
*
* @return
* NULL on successful freeing of session.
* Session pointer on failure to free session.
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id,
- struct rte_cryptodev_session *session);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *session);
#ifdef __cplusplus
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index 8270afa..7d049ea 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -1,6 +1,6 @@
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,11 +56,6 @@ extern "C" {
#include "rte_crypto.h"
#include "rte_cryptodev.h"
-struct rte_cryptodev_stats;
-struct rte_cryptodev_info;
-struct rte_cryptodev_qp_conf;
-
-enum rte_cryptodev_event_type;
#ifdef RTE_LIBRTE_CRYPTODEV_DEBUG
#define RTE_PMD_DEBUG_TRACE(...) \
@@ -80,7 +75,6 @@ struct rte_cryptodev_session {
};
struct rte_cryptodev_driver;
-struct rte_cryptodev;
/**
* Initialisation function of a crypto driver invoked for each matching
@@ -364,7 +358,7 @@ typedef uint32_t (*cryptodev_queue_pair_count_t)(struct rte_cryptodev *dev);
* - On success returns a pointer to a rte_mempool
* - On failure returns a NULL pointer
*/
-typedef int (*cryptodev_create_session_pool_t)(
+typedef int (*cryptodev_sym_create_session_pool_t)(
struct rte_cryptodev *dev, unsigned nb_objs,
unsigned obj_cache_size, int socket_id);
@@ -378,7 +372,7 @@ typedef int (*cryptodev_create_session_pool_t)(
* - On success returns the size of the session structure for device
* - On failure returns 0
*/
-typedef unsigned (*cryptodev_get_session_private_size_t)(
+typedef unsigned (*cryptodev_sym_get_session_private_size_t)(
struct rte_cryptodev *dev);
/**
@@ -392,7 +386,7 @@ typedef unsigned (*cryptodev_get_session_private_size_t)(
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
+typedef void (*cryptodev_sym_initialize_session_t)(struct rte_mempool *mempool,
void *session_private);
/**
@@ -406,14 +400,14 @@ typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void * (*cryptodev_configure_session_t)(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+typedef void * (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
/**
* Free Crypto session.
* @param session Cryptodev session structure to free
*/
-typedef void (*cryptodev_free_session_t)(struct rte_cryptodev *dev,
+typedef void (*cryptodev_sym_free_session_t)(struct rte_cryptodev *dev,
void *session_private);
@@ -427,9 +421,9 @@ struct rte_cryptodev_ops {
cryptodev_info_get_t dev_infos_get; /**< Get device info. */
cryptodev_stats_get_t stats_get;
- /**< Get generic device statistics. */
+ /**< Get device statistics. */
cryptodev_stats_reset_t stats_reset;
- /**< Reset generic device statistics. */
+ /**< Reset device statistics. */
cryptodev_queue_pair_setup_t queue_pair_setup;
/**< Set up a device queue pair. */
@@ -442,13 +436,13 @@ struct rte_cryptodev_ops {
cryptodev_queue_pair_count_t queue_pair_count;
/**< Get count of the queue pairs. */
- cryptodev_get_session_private_size_t session_get_size;
+ cryptodev_sym_get_session_private_size_t session_get_size;
/**< Return private session. */
- cryptodev_initialize_session_t session_initialize;
+ cryptodev_sym_initialize_session_t session_initialize;
/**< Initialization function for private session data */
- cryptodev_configure_session_t session_configure;
+ cryptodev_sym_configure_session_t session_configure;
/**< Configure a Crypto session. */
- cryptodev_free_session_t session_clear;
+ cryptodev_sym_free_session_t session_clear;
/**< Clear a Crypto sessions private data. */
};
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
index 77993b6..5ce6058 100644
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ b/lib/librte_mbuf_offload/rte_mbuf_offload.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -67,7 +67,7 @@ extern "C" {
enum rte_mbuf_ol_op_type {
RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
/**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO
+ RTE_PKTMBUF_OL_CRYPTO_SYM
/**< Crypto offload operation */
};
@@ -84,7 +84,7 @@ struct rte_mbuf_offload {
enum rte_mbuf_ol_op_type type; /**< offload type */
union {
- struct rte_crypto_op crypto; /**< Crypto operation */
+ struct rte_crypto_sym_op crypto; /**< Crypto operation */
} op;
};
@@ -194,8 +194,8 @@ __rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
ol->type = type;
switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO:
- __rte_crypto_op_reset(&ol->op.crypto); break;
+ case RTE_PKTMBUF_OL_CRYPTO_SYM:
+ __rte_crypto_sym_op_reset(&ol->op.crypto); break;
default:
break;
}
@@ -278,24 +278,24 @@ __rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
* - On success returns pointer to first crypto xform in crypto operations chain
* - On failure returns NULL
*/
-static inline struct rte_crypto_xform *
-rte_pktmbuf_offload_alloc_crypto_xforms(struct rte_mbuf_offload *ol,
+static inline struct rte_crypto_sym_xform *
+rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
unsigned nb_xforms)
{
- struct rte_crypto_xform *xform;
+ struct rte_crypto_sym_xform *xform;
void *priv_data;
uint16_t size;
- size = sizeof(struct rte_crypto_xform) * nb_xforms;
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
if (priv_data == NULL)
return NULL;
- ol->op.crypto.xform = xform = (struct rte_crypto_xform *)priv_data;
+ ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
do {
- xform->type = RTE_CRYPTO_XFORM_NOT_SPECIFIED;
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
} while (xform);
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v3 2/2] cryptodev: change burst API to be crypto op oriented
2016-02-26 17:30 ` [dpdk-dev] [PATCH v3 0/2] cryptodev API changes Declan Doherty
2016-02-26 17:30 ` [dpdk-dev] [PATCH v3 1/2] cryptodev: API tidy and changes to support future extensions Declan Doherty
@ 2016-02-26 17:30 ` Declan Doherty
2016-02-29 16:00 ` [dpdk-dev] [PATCH v3 0/2] cryptodev API changes Declan Doherty
` (2 subsequent siblings)
4 siblings, 0 replies; 62+ messages in thread
From: Declan Doherty @ 2016-02-26 17:30 UTC (permalink / raw)
To: dev
This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts
rte_crypto_op's rather than the current implementation which operates on
rte_mbuf bursts, this simplifies the burst processing in the crypto PMDs and the
use of crypto operations in general.
The changes also continues the separatation of the symmetric operation parameters
from the more general operation parameters, this will simplify the integration of
asymmetric crypto operations in the future.
As well as the changes to the crypto APIs this patch adds functions for managing
rte_crypto_op pools to the cryptodev API. It modifies the existing PMDs, unit
tests and sample application to work with the modified APIs and finally
removes the now unused rte_mbuf_offload library.
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
---
MAINTAINERS | 6 +-
app/test/test_cryptodev.c | 804 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 253 +++----
config/common_bsdapp | 8 -
config/common_linuxapp | 8 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 171 +++--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 12 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 2 +-
drivers/crypto/qat/qat_crypto.c | 123 ++--
drivers/crypto/qat/qat_crypto.h | 12 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 4 +-
examples/l2fwd-crypto/main.c | 283 ++++----
lib/Makefile | 1 -
lib/librte_cryptodev/rte_crypto.h | 367 +++++++++-
lib/librte_cryptodev/rte_crypto_sym.h | 377 +++++-----
lib/librte_cryptodev/rte_cryptodev.c | 76 ++
lib/librte_cryptodev/rte_cryptodev.h | 109 ++-
lib/librte_cryptodev/rte_cryptodev_version.map | 3 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 --------
.../rte_mbuf_offload_version.map | 7 -
25 files changed, 1577 insertions(+), 1527 deletions(-)
delete mode 100644 lib/librte_mbuf_offload/Makefile
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
diff --git a/MAINTAINERS b/MAINTAINERS
index 628bc05..ad6b45e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -222,16 +222,12 @@ F: lib/librte_mbuf/
F: doc/guides/prog_guide/mbuf_lib.rst
F: app/test/test_mbuf.c
-Packet buffer offload - EXPERIMENTAL
-M: Declan Doherty <declan.doherty@intel.com>
-F: lib/librte_mbuf_offload/
-
Ethernet API
M: Thomas Monjalon <thomas.monjalon@6wind.com>
F: lib/librte_ether/
F: scripts/test-null.sh
-Crypto API - EXPERIMENTAL
+Crypto API
M: Declan Doherty <declan.doherty@intel.com>
F: lib/librte_cryptodev/
F: app/test/test_cryptodev*
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 951b443..e3d40d5 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -35,7 +35,6 @@
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#include <rte_mbuf_offload.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
@@ -48,7 +47,7 @@ static enum rte_cryptodev_type gbl_cryptodev_type;
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
struct rte_cryptodev_config conf;
struct rte_cryptodev_qp_conf qp_conf;
@@ -62,8 +61,7 @@ struct crypto_unittest_params {
struct rte_cryptodev_sym_session *sess;
- struct rte_mbuf_offload *ol;
- struct rte_crypto_sym_op *op;
+ struct rte_crypto_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -104,7 +102,7 @@ setup_test_string(struct rte_mempool *mpool,
return m;
}
-#if HEX_DUMP
+#ifdef HEX_DUMP
static void
hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
{
@@ -112,27 +110,29 @@ hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
}
#endif
-static struct rte_mbuf *
-process_crypto_request(uint8_t dev_id, struct rte_mbuf *ibuf)
+static struct rte_crypto_op *
+process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)
{
- struct rte_mbuf *obuf = NULL;
-#if HEX_DUMP
+#ifdef HEX_DUMP
hexdump_mbuf_data(stdout, "Enqueued Packet", ibuf);
#endif
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &ibuf, 1) != 1) {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
printf("Error sending packet for encryption");
return NULL;
}
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &obuf, 1) == 0)
+
+ op = NULL;
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
rte_pause();
-#if HEX_DUMP
+#ifdef HEX_DUMP
if (obuf)
hexdump_mbuf_data(stdout, "Dequeued Packet", obuf);
#endif
- return obuf;
+ return op;
}
static struct crypto_testsuite_params testsuite_params = { NULL };
@@ -162,13 +162,14 @@ testsuite_setup(void)
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "MBUF_OFFLOAD_POOL",
+ ts_params->op_mpool = rte_crypto_op_pool_create(
+ "MBUF_CRYPTO_SYM_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_sym_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -253,10 +254,9 @@ testsuite_teardown(void)
rte_mempool_count(ts_params->mbuf_pool));
}
-
- if (ts_params->mbuf_ol_pool != NULL) {
+ if (ts_params->op_mpool != NULL) {
RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n",
- rte_mempool_count(ts_params->mbuf_ol_pool));
+ rte_mempool_count(ts_params->op_mpool));
}
}
@@ -326,8 +326,8 @@ ut_teardown(void)
}
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
/*
* free mbuf - both obuf and ibuf are usually the same,
@@ -680,76 +680,77 @@ static uint8_t aes_cbc_iv[] = {
/* ***** AES-CBC / HMAC-SHA1 Hash Tests ***** */
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_ciphertext[] = {
- 0x8B, 0X4D, 0XDA, 0X1B, 0XCF, 0X04, 0XA0, 0X31,
- 0XB4, 0XBF, 0XBD, 0X68, 0X43, 0X20, 0X7E, 0X76,
- 0XB1, 0X96, 0X8B, 0XA2, 0X7C, 0XA2, 0X83, 0X9E,
- 0X39, 0X5A, 0X2F, 0X7E, 0X92, 0XB4, 0X48, 0X1A,
- 0X3F, 0X6B, 0X5D, 0XDF, 0X52, 0X85, 0X5F, 0X8E,
- 0X42, 0X3C, 0XFB, 0XE9, 0X1A, 0X24, 0XD6, 0X08,
- 0XDD, 0XFD, 0X16, 0XFB, 0XE9, 0X55, 0XEF, 0XF0,
- 0XA0, 0X8D, 0X13, 0XAB, 0X81, 0XC6, 0X90, 0X01,
- 0XB5, 0X18, 0X84, 0XB3, 0XF6, 0XE6, 0X11, 0X57,
- 0XD6, 0X71, 0XC6, 0X3C, 0X3F, 0X2F, 0X33, 0XEE,
- 0X24, 0X42, 0X6E, 0XAC, 0X0B, 0XCA, 0XEC, 0XF9,
- 0X84, 0XF8, 0X22, 0XAA, 0X60, 0XF0, 0X32, 0XA9,
- 0X75, 0X75, 0X3B, 0XCB, 0X70, 0X21, 0X0A, 0X8D,
- 0X0F, 0XE0, 0XC4, 0X78, 0X2B, 0XF8, 0X97, 0XE3,
- 0XE4, 0X26, 0X4B, 0X29, 0XDA, 0X88, 0XCD, 0X46,
- 0XEC, 0XAA, 0XF9, 0X7F, 0XF1, 0X15, 0XEA, 0XC3,
- 0X87, 0XE6, 0X31, 0XF2, 0XCF, 0XDE, 0X4D, 0X80,
- 0X70, 0X91, 0X7E, 0X0C, 0XF7, 0X26, 0X3A, 0X92,
- 0X4F, 0X18, 0X83, 0XC0, 0X8F, 0X59, 0X01, 0XA5,
- 0X88, 0XD1, 0XDB, 0X26, 0X71, 0X27, 0X16, 0XF5,
- 0XEE, 0X10, 0X82, 0XAC, 0X68, 0X26, 0X9B, 0XE2,
- 0X6D, 0XD8, 0X9A, 0X80, 0XDF, 0X04, 0X31, 0XD5,
- 0XF1, 0X35, 0X5C, 0X3B, 0XDD, 0X9A, 0X65, 0XBA,
- 0X58, 0X34, 0X85, 0X61, 0X1C, 0X42, 0X10, 0X76,
- 0X73, 0X02, 0X42, 0XC9, 0X23, 0X18, 0X8E, 0XB4,
- 0X6F, 0XB4, 0XA3, 0X54, 0X6E, 0X88, 0X3B, 0X62,
- 0X7C, 0X02, 0X8D, 0X4C, 0X9F, 0XC8, 0X45, 0XF4,
- 0XC9, 0XDE, 0X4F, 0XEB, 0X22, 0X83, 0X1B, 0XE4,
- 0X49, 0X37, 0XE4, 0XAD, 0XE7, 0XCD, 0X21, 0X54,
- 0XBC, 0X1C, 0XC2, 0X04, 0X97, 0XB4, 0X10, 0X61,
- 0XF0, 0XE4, 0XEF, 0X27, 0X63, 0X3A, 0XDA, 0X91,
- 0X41, 0X25, 0X62, 0X1C, 0X5C, 0XB6, 0X38, 0X4A,
- 0X88, 0X71, 0X59, 0X5A, 0X8D, 0XA0, 0X09, 0XAF,
- 0X72, 0X94, 0XD7, 0X79, 0X5C, 0X60, 0X7C, 0X8F,
- 0X4C, 0XF5, 0XD9, 0XA1, 0X39, 0X6D, 0X81, 0X28,
- 0XEF, 0X13, 0X28, 0XDF, 0XF5, 0X3E, 0XF7, 0X8E,
- 0X09, 0X9C, 0X78, 0X18, 0X79, 0XB8, 0X68, 0XD7,
- 0XA8, 0X29, 0X62, 0XAD, 0XDE, 0XE1, 0X61, 0X76,
- 0X1B, 0X05, 0X16, 0XCD, 0XBF, 0X02, 0X8E, 0XA6,
- 0X43, 0X6E, 0X92, 0X55, 0X4F, 0X60, 0X9C, 0X03,
- 0XB8, 0X4F, 0XA3, 0X02, 0XAC, 0XA8, 0XA7, 0X0C,
- 0X1E, 0XB5, 0X6B, 0XF8, 0XC8, 0X4D, 0XDE, 0XD2,
- 0XB0, 0X29, 0X6E, 0X40, 0XE6, 0XD6, 0XC9, 0XE6,
- 0XB9, 0X0F, 0XB6, 0X63, 0XF5, 0XAA, 0X2B, 0X96,
- 0XA7, 0X16, 0XAC, 0X4E, 0X0A, 0X33, 0X1C, 0XA6,
- 0XE6, 0XBD, 0X8A, 0XCF, 0X40, 0XA9, 0XB2, 0XFA,
- 0X63, 0X27, 0XFD, 0X9B, 0XD9, 0XFC, 0XD5, 0X87,
- 0X8D, 0X4C, 0XB6, 0XA4, 0XCB, 0XE7, 0X74, 0X55,
- 0XF4, 0XFB, 0X41, 0X25, 0XB5, 0X4B, 0X0A, 0X1B,
- 0XB1, 0XD6, 0XB7, 0XD9, 0X47, 0X2A, 0XC3, 0X98,
- 0X6A, 0XC4, 0X03, 0X73, 0X1F, 0X93, 0X6E, 0X53,
- 0X19, 0X25, 0X64, 0X15, 0X83, 0XF9, 0X73, 0X2A,
- 0X74, 0XB4, 0X93, 0X69, 0XC4, 0X72, 0XFC, 0X26,
- 0XA2, 0X9F, 0X43, 0X45, 0XDD, 0XB9, 0XEF, 0X36,
- 0XC8, 0X3A, 0XCD, 0X99, 0X9B, 0X54, 0X1A, 0X36,
- 0XC1, 0X59, 0XF8, 0X98, 0XA8, 0XCC, 0X28, 0X0D,
- 0X73, 0X4C, 0XEE, 0X98, 0XCB, 0X7C, 0X58, 0X7E,
- 0X20, 0X75, 0X1E, 0XB7, 0XC9, 0XF8, 0XF2, 0X0E,
- 0X63, 0X9E, 0X05, 0X78, 0X1A, 0XB6, 0XA8, 0X7A,
- 0XF9, 0X98, 0X6A, 0XA6, 0X46, 0X84, 0X2E, 0XF6,
- 0X4B, 0XDC, 0X9B, 0X8F, 0X9B, 0X8F, 0XEE, 0XB4,
- 0XAA, 0X3F, 0XEE, 0XC0, 0X37, 0X27, 0X76, 0XC7,
- 0X95, 0XBB, 0X26, 0X74, 0X69, 0X12, 0X7F, 0XF1,
- 0XBB, 0XFF, 0XAE, 0XB5, 0X99, 0X6E, 0XCB, 0X0C
+ 0x8B, 0x4D, 0xDA, 0x1B, 0xCF, 0x04, 0xA0, 0x31,
+ 0xB4, 0xBF, 0xBD, 0x68, 0x43, 0x20, 0x7E, 0x76,
+ 0xB1, 0x96, 0x8B, 0xA2, 0x7C, 0xA2, 0x83, 0x9E,
+ 0x39, 0x5A, 0x2F, 0x7E, 0x92, 0xB4, 0x48, 0x1A,
+ 0x3F, 0x6B, 0x5D, 0xDF, 0x52, 0x85, 0x5F, 0x8E,
+ 0x42, 0x3C, 0xFB, 0xE9, 0x1A, 0x24, 0xD6, 0x08,
+ 0xDD, 0xFD, 0x16, 0xFB, 0xE9, 0x55, 0xEF, 0xF0,
+ 0xA0, 0x8D, 0x13, 0xAB, 0x81, 0xC6, 0x90, 0x01,
+ 0xB5, 0x18, 0x84, 0xB3, 0xF6, 0xE6, 0x11, 0x57,
+ 0xD6, 0x71, 0xC6, 0x3C, 0x3F, 0x2F, 0x33, 0xEE,
+ 0x24, 0x42, 0x6E, 0xAC, 0x0B, 0xCA, 0xEC, 0xF9,
+ 0x84, 0xF8, 0x22, 0xAA, 0x60, 0xF0, 0x32, 0xA9,
+ 0x75, 0x75, 0x3B, 0xCB, 0x70, 0x21, 0x0A, 0x8D,
+ 0x0F, 0xE0, 0xC4, 0x78, 0x2B, 0xF8, 0x97, 0xE3,
+ 0xE4, 0x26, 0x4B, 0x29, 0xDA, 0x88, 0xCD, 0x46,
+ 0xEC, 0xAA, 0xF9, 0x7F, 0xF1, 0x15, 0xEA, 0xC3,
+ 0x87, 0xE6, 0x31, 0xF2, 0xCF, 0xDE, 0x4D, 0x80,
+ 0x70, 0x91, 0x7E, 0x0C, 0xF7, 0x26, 0x3A, 0x92,
+ 0x4F, 0x18, 0x83, 0xC0, 0x8F, 0x59, 0x01, 0xA5,
+ 0x88, 0xD1, 0xDB, 0x26, 0x71, 0x27, 0x16, 0xF5,
+ 0xEE, 0x10, 0x82, 0xAC, 0x68, 0x26, 0x9B, 0xE2,
+ 0x6D, 0xD8, 0x9A, 0x80, 0xDF, 0x04, 0x31, 0xD5,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x73, 0x02, 0x42, 0xC9, 0x23, 0x18, 0x8E, 0xB4,
+ 0x6F, 0xB4, 0xA3, 0x54, 0x6E, 0x88, 0x3B, 0x62,
+ 0x7C, 0x02, 0x8D, 0x4C, 0x9F, 0xC8, 0x45, 0xF4,
+ 0xC9, 0xDE, 0x4F, 0xEB, 0x22, 0x83, 0x1B, 0xE4,
+ 0x49, 0x37, 0xE4, 0xAD, 0xE7, 0xCD, 0x21, 0x54,
+ 0xBC, 0x1C, 0xC2, 0x04, 0x97, 0xB4, 0x10, 0x61,
+ 0xF0, 0xE4, 0xEF, 0x27, 0x63, 0x3A, 0xDA, 0x91,
+ 0x41, 0x25, 0x62, 0x1C, 0x5C, 0xB6, 0x38, 0x4A,
+ 0x88, 0x71, 0x59, 0x5A, 0x8D, 0xA0, 0x09, 0xAF,
+ 0x72, 0x94, 0xD7, 0x79, 0x5C, 0x60, 0x7C, 0x8F,
+ 0x4C, 0xF5, 0xD9, 0xA1, 0x39, 0x6D, 0x81, 0x28,
+ 0xEF, 0x13, 0x28, 0xDF, 0xF5, 0x3E, 0xF7, 0x8E,
+ 0x09, 0x9C, 0x78, 0x18, 0x79, 0xB8, 0x68, 0xD7,
+ 0xA8, 0x29, 0x62, 0xAD, 0xDE, 0xE1, 0x61, 0x76,
+ 0x1B, 0x05, 0x16, 0xCD, 0xBF, 0x02, 0x8E, 0xA6,
+ 0x43, 0x6E, 0x92, 0x55, 0x4F, 0x60, 0x9C, 0x03,
+ 0xB8, 0x4F, 0xA3, 0x02, 0xAC, 0xA8, 0xA7, 0x0C,
+ 0x1E, 0xB5, 0x6B, 0xF8, 0xC8, 0x4D, 0xDE, 0xD2,
+ 0xB0, 0x29, 0x6E, 0x40, 0xE6, 0xD6, 0xC9, 0xE6,
+ 0xB9, 0x0F, 0xB6, 0x63, 0xF5, 0xAA, 0x2B, 0x96,
+ 0xA7, 0x16, 0xAC, 0x4E, 0x0A, 0x33, 0x1C, 0xA6,
+ 0xE6, 0xBD, 0x8A, 0xCF, 0x40, 0xA9, 0xB2, 0xFA,
+ 0x63, 0x27, 0xFD, 0x9B, 0xD9, 0xFC, 0xD5, 0x87,
+ 0x8D, 0x4C, 0xB6, 0xA4, 0xCB, 0xE7, 0x74, 0x55,
+ 0xF4, 0xFB, 0x41, 0x25, 0xB5, 0x4B, 0x0A, 0x1B,
+ 0xB1, 0xD6, 0xB7, 0xD9, 0x47, 0x2A, 0xC3, 0x98,
+ 0x6A, 0xC4, 0x03, 0x73, 0x1F, 0x93, 0x6E, 0x53,
+ 0x19, 0x25, 0x64, 0x15, 0x83, 0xF9, 0x73, 0x2A,
+ 0x74, 0xB4, 0x93, 0x69, 0xC4, 0x72, 0xFC, 0x26,
+ 0xA2, 0x9F, 0x43, 0x45, 0xDD, 0xB9, 0xEF, 0x36,
+ 0xC8, 0x3A, 0xCD, 0x99, 0x9B, 0x54, 0x1A, 0x36,
+ 0xC1, 0x59, 0xF8, 0x98, 0xA8, 0xCC, 0x28, 0x0D,
+ 0x73, 0x4C, 0xEE, 0x98, 0xCB, 0x7C, 0x58, 0x7E,
+ 0x20, 0x75, 0x1E, 0xB7, 0xC9, 0xF8, 0xF2, 0x0E,
+ 0x63, 0x9E, 0x05, 0x78, 0x1A, 0xB6, 0xA8, 0x7A,
+ 0xF9, 0x98, 0x6A, 0xA6, 0x46, 0x84, 0x2E, 0xF6,
+ 0x4B, 0xDC, 0x9B, 0x8F, 0x9B, 0x8F, 0xEE, 0xB4,
+ 0xAA, 0x3F, 0xEE, 0xC0, 0x37, 0x27, 0x76, 0xC7,
+ 0x95, 0xBB, 0x26, 0x74, 0x69, 0x12, 0x7F, 0xF1,
+ 0xBB, 0xFF, 0xAE, 0xB5, 0x99, 0x6E, 0xCB, 0x0C
};
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest[] = {
- 0x9a, 0X4f, 0X88, 0X1b, 0Xb6, 0X8f, 0Xd8, 0X60,
- 0X42, 0X1a, 0X7d, 0X3d, 0Xf5, 0X82, 0X80, 0Xf1,
- 0X18, 0X8c, 0X1d, 0X32 };
+ 0x9a, 0x4f, 0x88, 0x1b, 0xb6, 0x8f, 0xd8, 0x60,
+ 0x42, 0x1a, 0x7d, 0x3d, 0xf5, 0x82, 0x80, 0xf1,
+ 0x18, 0x8c, 0x1d, 0x32
+};
static int
@@ -776,7 +777,6 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
-
ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
@@ -786,59 +786,65 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.key.data = hmac_sha1_key;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
- /* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ /* Create crypto session*/
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
- /* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ /* Generate crypto op data structure */
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operationsource mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ /* Set crypto operation authentication parameters */
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ /* Set crypto operation cipher parameters */
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ uint8_t *ciphertext = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC);
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(ciphertext,
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
- "Ciphertext data not as expected");
+ "ciphertext data not as expected");
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ uint8_t *digest = ciphertext + QUOTE_512_BYTES;
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(digest,
catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest,
gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 :
@@ -863,60 +869,66 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
- ut_params->ol, 2),
+ TEST_ASSERT_NOT_NULL(rte_crypto_op_sym_xforms_alloc(ut_params->op, 2),
"failed to allocate space for crypto transforms");
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ sym_op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
/* cipher parameters */
- ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- ut_params->op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->op->xform->cipher.key.data = aes_cbc_key;
- ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
+ sym_op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ sym_op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ sym_op->xform->cipher.key.data = aes_cbc_key;
+ sym_op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ sym_op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- ut_params->op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
- ut_params->op->xform->next->auth.key.data = hmac_sha1_key;
- ut_params->op->xform->next->auth.digest_length =
+ sym_op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ sym_op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ sym_op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
+ sym_op->xform->next->auth.key.data = hmac_sha1_key;
+ sym_op->xform->next->auth.digest_length =
DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -985,42 +997,48 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ /* attach symmetric crypto session to crypto operations */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
+
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1088,47 +1106,51 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1197,42 +1219,47 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
- /* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1244,7 +1271,6 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
"Digest verification failed");
-
return TEST_SUCCESS;
}
@@ -1312,43 +1338,46 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
-
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1367,7 +1396,6 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
DIGEST_BYTE_LENGTH_SHA512,
"Generated digest data not as expected");
-
return TEST_SUCCESS;
}
@@ -1424,6 +1452,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
ut_params->auth_xform.auth.key.data = hmac_sha512_key;
ut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA512;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
+
return TEST_SUCCESS;
}
@@ -1448,43 +1477,46 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
- /* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1497,8 +1529,6 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
"Digest verification failed");
-
-
return TEST_SUCCESS;
}
@@ -1524,10 +1554,6 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
catch_22_quote, QUOTE_512_BYTES, 0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_AES_XCBC);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
@@ -1548,56 +1574,75 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->iv.data = (uint8_t *)
- rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ /* Set operation cipher parameters */
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->m_src, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(sym_op->m_src);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
+
+ /* Set operation authentication parameters */
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ sym_op->m_src, DIGEST_BYTE_LENGTH_AES_XCBC);
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->m_src,
+ CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES);
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+ memset(sym_op->auth.digest.data, 0, DIGEST_BYTE_LENGTH_AES_XCBC);
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC),
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
"Ciphertext data not as expected");
+
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ rte_pktmbuf_mtod_offset(
+ ut_params->op->sym->m_src, uint8_t *,
+ CIPHER_IV_LENGTH_AES_CBC +
+ QUOTE_512_BYTES),
catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
- DIGEST_BYTE_LENGTH_AES_XCBC,
+ gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
+ TRUNCATED_DIGEST_BYTE_LENGTH_AES_XCBC :
+ DIGEST_BYTE_LENGTH_AES_XCBC,
"Generated digest data not as expected");
return TEST_SUCCESS;
@@ -1614,14 +1659,6 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
(const char *)catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES, 0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_AES_XCBC);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
- rte_memcpy(ut_params->digest,
- catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
- DIGEST_BYTE_LENGTH_AES_XCBC);
-
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
@@ -1648,35 +1685,55 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ /* Set crypto operation data parameters */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ ut_params->ibuf, DIGEST_BYTE_LENGTH_AES_XCBC);
+ TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+ "no room to append digest");
+
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ ut_params->ibuf, QUOTE_512_BYTES);
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+ rte_memcpy(sym_op->auth.digest.data,
+ catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
+ DIGEST_BYTE_LENGTH_AES_XCBC);
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1837,50 +1894,53 @@ test_not_in_place_crypto(void)
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+ sym_op->m_dst = dst_m;
+
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
-
- ut_params->op->dst.m = dst_m;
- ut_params->op->dst.offset = 0;
-
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "no crypto operation returned");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto operation processing failed");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->op->dst.m, char *),
+ rte_pktmbuf_mtod(ut_params->op->sym->m_dst, char *),
catch_22_quote,
QUOTE_512_BYTES,
"Plaintext data not as expected");
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 034393e..c84ba42 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -32,8 +32,6 @@
#ifndef TEST_CRYPTODEV_H_
#define TEST_CRYPTODEV_H_
-#define HEX_DUMP 0
-
#define FALSE 0
#define TRUE 1
@@ -47,8 +45,9 @@
#define DEFAULT_NUM_XFORMS (2)
#define NUM_MBUFS (8191)
#define MBUF_CACHE_SIZE (250)
-#define MBUF_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512 + \
- sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATAPAYLOAD_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512)
+#define MBUF_SIZE (sizeof(struct rte_mbuf) + \
+ RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
#define BYTE_LENGTH(x) (x/8)
/* HASH DIGEST LENGTHS */
@@ -62,7 +61,9 @@
#define AES_XCBC_MAC_KEY_SZ (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 (12)
+#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA224 (14)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA256 (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA512 (32)
+#define TRUNCATED_DIGEST_BYTE_LENGTH_AES_XCBC (12)
#endif /* TEST_CRYPTODEV_H_ */
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 42dd9bc..b3f4fd9 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -32,7 +32,6 @@
#include <rte_common.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
@@ -50,7 +49,7 @@
struct crypto_testsuite_params {
struct rte_mempool *mbuf_mp;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
uint16_t nb_queue_pairs;
@@ -68,8 +67,7 @@ struct crypto_unittest_params {
struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_sym_op *op;
- struct rte_mbuf_offload *ol;
+ struct rte_crypto_op *op;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
struct rte_mbuf *ibuf[MAX_NUM_OF_OPS_PER_UT];
@@ -79,7 +77,7 @@ struct crypto_unittest_params {
static struct rte_mbuf *
setup_test_string(struct rte_mempool *mpool,
- const char *string, size_t len, uint8_t blocksize)
+ const uint8_t *data, size_t len, uint8_t blocksize)
{
struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
size_t t_len = len - (blocksize ? (len % blocksize) : 0);
@@ -92,7 +90,7 @@ setup_test_string(struct rte_mempool *mpool,
return NULL;
}
- rte_memcpy(dst, string, t_len);
+ rte_memcpy(dst, (const void *)data, t_len);
}
return m;
}
@@ -113,23 +111,24 @@ testsuite_setup(void)
ts_params->mbuf_mp = rte_mempool_lookup("CRYPTO_PERF_MBUFPOOL");
if (ts_params->mbuf_mp == NULL) {
/* Not already created so create */
- ts_params->mbuf_mp = rte_mempool_create("CRYPTO_PERF_MBUFPOOL", NUM_MBUFS,
- MBUF_SIZE, MBUF_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
- rte_socket_id(), 0);
+ ts_params->mbuf_mp = rte_pktmbuf_pool_create(
+ "CRYPTO_PERF_MBUFPOOL",
+ NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
+ rte_socket_id());
if (ts_params->mbuf_mp == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_PERF_MBUFPOOL\n");
return TEST_FAILED;
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
- NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_sym_xform),
- rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+
+ ts_params->op_mpool = rte_crypto_op_pool_create("CRYPTO_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ NUM_MBUFS, MBUF_CACHE_SIZE,
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_sym_xform),
+ rte_socket_id());
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -256,8 +255,8 @@ ut_teardown(void)
ut_params->sess);
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
for (i = 0; i < MAX_NUM_OF_OPS_PER_UT; i++) {
if (ut_params->obuf[i])
@@ -1698,11 +1697,12 @@ struct crypto_data_params aes_cbc_hmac_sha256_output[MAX_PACKET_SIZE_INDEX] = {
static int
test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
{
- uint32_t num_to_submit = 2048, max_outstanding_reqs = 512;
- struct rte_mbuf *rx_mbufs[num_to_submit], *tx_mbufs[num_to_submit];
+ uint32_t num_to_submit = 4096;
+ struct rte_crypto_op *c_ops[num_to_submit];
+ struct rte_crypto_op *proc_ops[num_to_submit];
uint64_t failed_polls, retries, start_cycles, end_cycles, total_cycles = 0;
uint32_t burst_sent, burst_received;
- uint32_t b, burst_size, num_sent, num_received;
+ uint32_t i, burst_size, num_sent, num_received;
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1739,46 +1739,50 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure(s) */
- for (b = 0; b < num_to_submit ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ for (i = 0; i < num_to_submit ; i++) {
+ struct rte_mbuf *m = setup_test_string(ts_params->mbuf_mp,
+ data_params[0].expected.ciphertext,
data_params[0].length, 0);
- TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
+ TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(tx_mbufs[b],
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ TEST_ASSERT_NOT_NULL(ut_params->digest,
+ "no room to append digest");
rte_memcpy(ut_params->digest, data_params[0].expected.digest,
DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_sym_op *cop = &ol->op.crypto;
+ struct rte_crypto_op *op =
+ rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+ rte_crypto_op_attach_sym_session(op, ut_params->sess);
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
+ op->sym->auth.digest.data = ut_params->digest;
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
data_params[0].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->auth.data.length = data_params[0].length;
+
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ op->sym->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(m,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[0].length;
+ op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.data.length = data_params[0].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[0].length;
+ op->sym->m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[i] = op;
}
printf("\nTest to measure the IA cycle cost using AES128_CBC_SHA256_HMAC "
@@ -1789,17 +1793,17 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
printf("\nDev No\tQP No\tNum Sent\tNum Received\tTx/Rx burst");
printf("\tRetries (Device Busy)\tAverage IA cycle cost "
"(assuming 0 retries)");
- for (b = 2; b <= 128 ; b *= 2) {
+ for (i = 2; i <= 128 ; i *= 2) {
num_sent = 0;
num_received = 0;
retries = 0;
failed_polls = 0;
- burst_size = b;
+ burst_size = i;
total_cycles = 0;
while (num_sent < num_to_submit) {
start_cycles = rte_rdtsc_precise();
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0,
- &tx_mbufs[num_sent],
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, &c_ops[num_sent],
((num_to_submit-num_sent) < burst_size) ?
num_to_submit-num_sent : burst_size);
if (burst_sent == 0)
@@ -1814,9 +1818,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1824,15 +1827,15 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
end_cycles = rte_rdtsc_precise();
total_cycles += end_cycles - start_cycles;
}
+
while (num_received != num_to_submit) {
if (gbl_cryptodev_preftest_devtype ==
- RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num,
- 0, NULL, 0);
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1846,16 +1849,9 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
printf("\n");
- for (b = 0; b < max_outstanding_reqs ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ for (i = 0; i < num_to_submit ; i++) {
+ rte_pktmbuf_free(c_ops[i]->sym->m_src);
+ rte_crypto_op_free(c_ops[i]);
}
return TEST_SUCCESS;
}
@@ -1865,11 +1861,14 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
{
uint16_t index;
uint32_t burst_sent, burst_received;
- uint32_t b, num_sent, num_received, throughput;
+ uint32_t b, num_sent, num_received;
uint64_t failed_polls, retries, start_cycles, end_cycles;
const uint64_t mhz = rte_get_tsc_hz()/1000000;
- double mmps;
- struct rte_mbuf *rx_mbufs[DEFAULT_BURST_SIZE], *tx_mbufs[DEFAULT_BURST_SIZE];
+ double throughput, mmps;
+
+ struct rte_crypto_op *c_ops[DEFAULT_BURST_SIZE];
+ struct rte_crypto_op *proc_ops[DEFAULT_BURST_SIZE];
+
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1908,7 +1907,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
"AES128_CBC_SHA256_HMAC requests with a constant burst "
"size of %u while varying payload sizes", DEFAULT_BURST_SIZE);
printf("\nDev No\tQP No\tReq Size(B)\tNum Sent\tNum Received\t"
- "Mrps\tThoughput(Mbps)");
+ "Mrps\tThoughput(Gbps)");
printf("\tRetries (Attempted a burst, but the device was busy)");
for (index = 0; index < MAX_PACKET_SIZE_INDEX; index++) {
num_sent = 0;
@@ -1918,63 +1917,70 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
+ struct rte_mbuf *m = setup_test_string(
+ ts_params->mbuf_mp,
+ (const uint8_t *)
data_params[index].plaintext,
data_params[index].length,
0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(
- tx_mbufs[b], DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
+ DIGEST_BYTE_LENGTH_SHA256);
+ TEST_ASSERT_NOT_NULL(ut_params->digest
+ , "no room to append digest");
- rte_memcpy(ut_params->digest, data_params[index].expected.digest,
- DIGEST_BYTE_LENGTH_SHA256);
+ rte_memcpy(ut_params->digest,
+ data_params[index].expected.digest,
+ DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
+ struct rte_crypto_op *op = rte_crypto_op_alloc(
+ ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- struct rte_crypto_sym_op *cop = &ol->op.crypto;
+ rte_crypto_op_attach_sym_session(op, ut_params->sess);
- rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+ op->sym->auth.digest.data = ut_params->digest;
+ op->sym->auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(m,
+ data_params[index].length);
+ op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
- tx_mbufs[b], data_params[index].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->auth.data.length = data_params[index].length;
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ op->sym->cipher.iv.data = (uint8_t *)
+ rte_pktmbuf_prepend(m,
+ CIPHER_IV_LENGTH_AES_CBC);
+ op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.data.length = data_params[index].length;
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[index].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[index].length;
+ op->sym->m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[b] = op;
}
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num,
- 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
- < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
- DEFAULT_BURST_SIZE);
+ uint16_t burst_size = (DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent) < DEFAULT_BURST_SIZE ?
+ DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent : DEFAULT_BURST_SIZE;
+
+ burst_sent = rte_cryptodev_enqueue_burst(
+ dev_num, 0, c_ops, burst_size);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, DEFAULT_BURST_SIZE);
+ burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ 0, proc_ops, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
@@ -1982,37 +1988,34 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
if (gbl_cryptodev_preftest_devtype ==
- RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num,
- 0, NULL, 0);
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num, 0,
- rx_mbufs, DEFAULT_BURST_SIZE);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops,
+ DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
num_received += burst_received;
}
end_cycles = rte_rdtsc_precise();
- mmps = (double)num_received*mhz/(end_cycles - start_cycles);
- throughput = mmps*data_params[index].length*8;
+ mmps = ((double)num_received * mhz) /
+ (end_cycles - start_cycles);
+ throughput = (mmps * data_params[index].length * 8) / 1000;
+
printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
- data_params[index].length, num_sent, num_received);
- printf("\t%.2f\t%u", mmps, throughput);
+ data_params[index].length,
+ num_sent, num_received);
+ printf("\t%.2f\t%.2f", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ rte_pktmbuf_free(c_ops[b]->sym->m_src);
+ rte_crypto_op_free(c_ops[b]);
}
}
+
printf("\n");
return TEST_SUCCESS;
}
diff --git a/config/common_bsdapp b/config/common_bsdapp
index 696382c..69a1016 100644
--- a/config/common_bsdapp
+++ b/config/common_bsdapp
@@ -306,7 +306,6 @@ CONFIG_RTE_PMD_PACKET_PREFETCH=y
#
# Compile generic crypto device library
-# EXPERIMENTAL: API may change without prior notice
#
CONFIG_RTE_LIBRTE_CRYPTODEV=y
CONFIG_RTE_LIBRTE_CRYPTODEV_DEBUG=n
@@ -357,13 +356,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
#
-# Compile librte_mbuf_offload
-# EXPERIMENTAL: API may change without prior notice
-#
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n
-
-#
# Compile librte_timer
#
CONFIG_RTE_LIBRTE_TIMER=y
diff --git a/config/common_linuxapp b/config/common_linuxapp
index f1638db..6da8f54 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -320,7 +320,6 @@ CONFIG_RTE_PMD_PACKET_PREFETCH=y
#
# Compile generic crypto device library
-# EXPERIMENTAL: API may change without prior notice
#
CONFIG_RTE_LIBRTE_CRYPTODEV=y
CONFIG_RTE_LIBRTE_CRYPTODEV_DEBUG=n
@@ -373,13 +372,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
#
-# Compile librte_mbuf_offload
-# EXPERIMENTAL: API may change without prior notice
-#
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n
-
-#
# Compile librte_timer
#
CONFIG_RTE_LIBRTE_TIMER=y
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 7a91001..f626386 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -104,7 +104,6 @@ There are many libraries, so their headers may be grouped by topics:
- **containers**:
[mbuf] (@ref rte_mbuf.h),
- [mbuf_offload] (@ref rte_mbuf_offload.h),
[ring] (@ref rte_ring.h),
[distributor] (@ref rte_distributor.h),
[reorder] (@ref rte_reorder.h),
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 61d93cd..5b26444 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -37,7 +37,6 @@
#include <rte_dev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
-#include <rte_mbuf_offload.h>
#include "rte_aesni_mb_pmd_private.h"
@@ -296,16 +295,16 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(crypto_op->session->type !=
+ if (op->sym->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
- sess = (struct aesni_mb_session *)crypto_op->session->_private;
+ sess = (struct aesni_mb_session *)op->sym->session->_private;
} else {
void *_sess = NULL;
@@ -316,7 +315,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
- sess, crypto_op->xform) != 0)) {
+ sess, op->sym->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
}
@@ -338,12 +337,14 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
* - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
*/
static JOB_AES_HMAC *
-process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_sym_op *c_op,
+process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
+ struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+ uint16_t m_offset = 0;
+
job = (*qp->ops->job.get_next)(&qp->mb_mgr);
if (unlikely(job == NULL))
return job;
@@ -372,49 +373,65 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
}
/* Mutable crypto operation parameters */
+ if (op->sym->m_dst) {
+ m_src = m_dst = op->sym->m_dst;
+
+ /* append space for output data to mbuf */
+ char *odata = rte_pktmbuf_append(m_dst,
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (odata == NULL)
+ MB_LOG_ERR("failed to allocate space in destination "
+ "mbuf for source data");
+
+ memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ } else {
+ m_dst = m_src;
+ m_offset = op->sym->cipher.data.offset;
+ }
/* Set digest output location */
if (job->cipher_direction == DECRYPT) {
- job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m,
+ job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
get_digest_byte_length(job->hash_alg));
- if (job->auth_tag_output)
- memset(job->auth_tag_output, 0,
- sizeof(get_digest_byte_length(job->hash_alg)));
- else
+ if (job->auth_tag_output == NULL) {
+ MB_LOG_ERR("failed to allocate space in output mbuf "
+ "for temp digest");
return NULL;
+ }
+
+ memset(job->auth_tag_output, 0,
+ sizeof(get_digest_byte_length(job->hash_alg)));
+
} else {
- job->auth_tag_output = c_op->digest.data;
+ job->auth_tag_output = op->sym->auth.digest.data;
}
/*
- * Multiple buffer library current only support returning a truncated
+ * Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
job->auth_tag_output_len_in_bytes =
get_truncated_digest_byte_length(job->hash_alg);
/* Set IV parameters */
- job->iv = c_op->iv.data;
- job->iv_len_in_bytes = c_op->iv.length;
+ job->iv = op->sym->cipher.iv.data;
+ job->iv_len_in_bytes = op->sym->cipher.iv.length;
/* Data Parameter */
- job->src = rte_pktmbuf_mtod(m, uint8_t *);
- job->dst = c_op->dst.m ?
- rte_pktmbuf_mtod(c_op->dst.m, uint8_t *) +
- c_op->dst.offset :
- rte_pktmbuf_mtod(m, uint8_t *) +
- c_op->data.to_cipher.offset;
+ job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
- job->cipher_start_src_offset_in_bytes = c_op->data.to_cipher.offset;
- job->msg_len_to_cipher_in_bytes = c_op->data.to_cipher.length;
+ job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
- job->hash_start_src_offset_in_bytes = c_op->data.to_hash.offset;
- job->msg_len_to_hash_in_bytes = c_op->data.to_hash.length;
+ job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
/* Set user data to be crypto operation data struct */
- job->user_data = m;
- job->user_data2 = c_op;
+ job->user_data = op;
+ job->user_data2 = m_dst;
return job;
}
@@ -429,43 +446,41 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
* verification of supplied digest in the case of a HASH_CIPHER operation
* - Returns NULL on invalid job
*/
-static struct rte_mbuf *
+static struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m;
- struct rte_crypto_sym_op *c_op;
+ struct rte_crypto_op *op =
+ (struct rte_crypto_op *)job->user_data;
+ struct rte_mbuf *m_dst =
+ (struct rte_mbuf *)job->user_data2;
- if (job->user_data == NULL)
+ if (op == NULL || m_dst == NULL)
return NULL;
- /* handled retrieved job */
- m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_sym_op *)job->user_data2;
-
/* set status as successful by default */
- c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* check if job has been processed */
if (unlikely(job->status != STS_COMPLETED)) {
- c_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return m;
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return op;
} else if (job->chain_order == HASH_CIPHER) {
/* Verify digest if required */
- if (memcmp(job->auth_tag_output, c_op->digest.data,
+ if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
job->auth_tag_output_len_in_bytes) != 0)
- c_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m, get_digest_byte_length(job->hash_alg));
+ rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
- rte_mempool_put(qp->sess_mp, c_op->session);
- c_op->session = NULL;
+ if (op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
}
- return m;
+ return op;
}
/**
@@ -481,16 +496,16 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
static unsigned
handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m = NULL;
+ struct rte_crypto_op *op = NULL;
unsigned processed_jobs = 0;
while (job) {
processed_jobs++;
- m = post_process_mb_job(qp, job);
- if (m)
- rte_ring_enqueue(qp->processed_pkts, (void *)m);
+ op = post_process_mb_job(qp, job);
+ if (op)
+ rte_ring_enqueue(qp->processed_pkts, (void *)op);
else
- qp->qp_stats.dequeue_err_count++;
+ qp->stats.dequeue_err_count++;
job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
}
@@ -499,53 +514,53 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
static uint16_t
-aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
- uint16_t nb_bufs)
+aesni_mb_pmd_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
-
struct aesni_mb_session *sess;
- struct aesni_mb_qp *qp = queue_pair;
+ struct aesni_mb_qp *_qp = qp;
JOB_AES_HMAC *job = NULL;
int i, processed_jobs = 0;
- for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i],
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- if (unlikely(ol == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ for (i = 0; i < nb_ops; i++) {
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ MB_LOG_ERR("PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
-
- sess = get_session(qp, &ol->op.crypto);
+#endif
+ sess = get_session(_qp, ops[i]);
if (unlikely(sess == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
- job = process_crypto_op(qp, bufs[i], &ol->op.crypto, sess);
+ job = process_crypto_op(_qp, ops[i], sess);
if (unlikely(job == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
/* Submit Job */
- job = (*qp->ops->job.submit)(&qp->mb_mgr);
+ job = (*_qp->ops->job.submit)(&_qp->mb_mgr);
/*
* If submit returns a processed job then handle it,
* before submitting subsequent jobs
*/
if (job)
- processed_jobs += handle_completed_jobs(qp, job);
+ processed_jobs += handle_completed_jobs(_qp, job);
}
if (processed_jobs == 0)
goto flush_jobs;
else
- qp->qp_stats.enqueued_count += processed_jobs;
+ _qp->stats.enqueued_count += processed_jobs;
return i;
flush_jobs:
@@ -553,24 +568,24 @@ flush_jobs:
* If we haven't processed any jobs in submit loop, then flush jobs
* queue to stop the output stalling
*/
- job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
+ job = (*_qp->ops->job.flush_job)(&_qp->mb_mgr);
if (job)
- qp->qp_stats.enqueued_count += handle_completed_jobs(qp, job);
+ _qp->stats.enqueued_count += handle_completed_jobs(_qp, job);
return i;
}
static uint16_t
-aesni_mb_pmd_dequeue_burst(void *queue_pair,
- struct rte_mbuf **bufs, uint16_t nb_bufs)
+aesni_mb_pmd_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct aesni_mb_qp *qp = queue_pair;
+ struct aesni_mb_qp *_qp = qp;
unsigned nb_dequeued;
- nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)bufs, nb_bufs);
- qp->qp_stats.dequeued_count += nb_dequeued;
+ nb_dequeued = rte_ring_dequeue_burst(_qp->processed_pkts,
+ (void **)ops, nb_ops);
+ _qp->stats.dequeued_count += nb_dequeued;
return nb_dequeued;
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 3cd9990..d56de12 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -76,11 +76,11 @@ aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- stats->enqueued_count += qp->qp_stats.enqueued_count;
- stats->dequeued_count += qp->qp_stats.dequeued_count;
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
- stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
- stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
}
}
@@ -93,7 +93,7 @@ aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
}
}
@@ -196,7 +196,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp->sess_mp = dev->data->session_pool;
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
/* Initialise multi-buffer manager */
(*qp->ops->job.init_mgr)(&qp->mb_mgr);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index ab70c15..0aed177 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -150,7 +150,7 @@ struct aesni_mb_qp {
/**< Ring for placing process packets */
struct rte_mempool *sess_mp;
/**< Session Mempool */
- struct rte_cryptodev_stats qp_stats;
+ struct rte_cryptodev_stats stats;
/**< Queue pair statistics */
} __rte_cache_aligned;
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index e7b9027..38dc956 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -59,7 +59,6 @@
#include <rte_mbuf.h>
#include <rte_string_fns.h>
#include <rte_spinlock.h>
-#include <rte_mbuf_offload.h>
#include <rte_hexdump.h>
#include "qat_logs.h"
@@ -72,7 +71,7 @@ static inline uint32_t
adf_modulo(uint32_t data, uint32_t shift);
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg);
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
void *session)
@@ -275,15 +274,16 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
register struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
register uint32_t nb_pkts_sent = 0;
- register struct rte_mbuf **cur_tx_pkt = tx_pkts;
+ register struct rte_crypto_op **cur_op = ops;
register int ret;
- uint16_t nb_pkts_possible = nb_pkts;
+ uint16_t nb_ops_possible = nb_ops;
register uint8_t *base_addr;
register uint32_t tail;
int overflow;
@@ -294,19 +294,17 @@ uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = queue->tail;
/* Find how many can actually fit on the ring */
- overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_pkts)
+ overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
- queue->max_inflights;
if (overflow > 0) {
rte_atomic16_sub(&tmp_qp->inflights16, overflow);
- nb_pkts_possible = nb_pkts - overflow;
- if (nb_pkts_possible == 0)
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
return 0;
}
- while (nb_pkts_sent != nb_pkts_possible) {
-
- ret = qat_alg_write_mbuf_entry(*cur_tx_pkt,
- base_addr + tail);
+ while (nb_pkts_sent != nb_ops_possible) {
+ ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
if (nb_pkts_sent == 0)
@@ -316,7 +314,7 @@ uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = adf_modulo(tail + queue->msg_size, queue->modulo);
nb_pkts_sent++;
- cur_tx_pkt++;
+ cur_op++;
}
kick_tail:
WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
@@ -327,14 +325,13 @@ kick_tail:
}
uint16_t
-qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
uint32_t msg_counter = 0;
- struct rte_mbuf *rx_mbuf;
+ struct rte_crypto_op *rx_op;
struct icp_qat_fw_comn_resp *resp_msg;
queue = &(tmp_qp->rx_q);
@@ -342,17 +339,20 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
((uint8_t *)queue->base_addr + queue->head);
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
- msg_counter != nb_pkts) {
- rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
+ msg_counter != nb_ops) {
+ rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
*(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
queue->head = adf_modulo(queue->head +
@@ -361,9 +361,8 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
resp_msg = (struct icp_qat_fw_comn_resp *)
((uint8_t *)queue->base_addr +
queue->head);
-
- *rx_pkts = rx_mbuf;
- rx_pkts++;
+ *ops = rx_op;
+ ops++;
msg_counter++;
}
if (msg_counter > 0) {
@@ -377,38 +376,36 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
}
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
{
- struct rte_mbuf_offload *ol;
-
struct qat_session *ctx;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
- if (unlikely(ol == NULL)) {
- PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
- "to (%p) mbuf.", mbuf);
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
return -EINVAL;
}
-
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
+#endif
+ if (unlikely(op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
- " requests mbuf (%p) is sessionless.", mbuf);
+ " requests, op (%p) is sessionless.", op);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type
- != RTE_CRYPTODEV_QAT_SYM_PMD)) {
+ if (unlikely(op->sym->session->type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
- ctx = (struct qat_session *)ol->op.crypto.session->_private;
+ ctx = (struct qat_session *)op->sym->session->_private;
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
*qat_req = ctx->fw_req;
- qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)mbuf;
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
/*
* The following code assumes:
@@ -416,37 +413,37 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
* - always in place.
*/
qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length = mbuf->data_len;
+ qat_req->comn_mid.src_length =
+ rte_pktmbuf_data_len(op->sym->m_src);
qat_req->comn_mid.dest_data_addr =
qat_req->comn_mid.src_data_addr =
- rte_pktmbuf_mtophys(mbuf);
-
+ rte_pktmbuf_mtophys(op->sym->m_src);
cipher_param = (void *)&qat_req->serv_specif_rqpars;
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
- cipher_param->cipher_length = ol->op.crypto.data.to_cipher.length;
- cipher_param->cipher_offset = ol->op.crypto.data.to_cipher.offset;
- if (ol->op.crypto.iv.length &&
- (ol->op.crypto.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array))) {
+ cipher_param->cipher_length = op->sym->cipher.data.length;
+ cipher_param->cipher_offset = op->sym->cipher.data.offset;
+ if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array))) {
rte_memcpy(cipher_param->u.cipher_IV_array,
- ol->op.crypto.iv.data, ol->op.crypto.iv.length);
+ op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
} else {
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr = ol->op.crypto.iv.phys_addr;
+ cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr;
}
- if (ol->op.crypto.digest.phys_addr) {
+ if (op->sym->auth.digest.phys_addr) {
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
- auth_param->auth_res_addr = ol->op.crypto.digest.phys_addr;
+ auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
}
- auth_param->auth_off = ol->op.crypto.data.to_hash.offset;
- auth_param->auth_len = ol->op.crypto.data.to_hash.length;
- auth_param->u1.aad_adr = ol->op.crypto.additional_auth.phys_addr;
+ auth_param->auth_off = op->sym->auth.data.offset;
+ auth_param->auth_len = op->sym->auth.data.length;
+ auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
/* (GCM) aad length(240 max) will be at this location after precompute */
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
@@ -457,9 +454,19 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
}
auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));
+ rte_hexdump(stdout, "src_data:",
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ op->sym->auth.digest.length);
+ rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
+ op->sym->auth.aad.length);
#endif
return 0;
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index e9f71fe..9323383 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -115,12 +115,12 @@ extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
-uint16_t
-qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
-uint16_t
-qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 85700fc..5e51aca 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -92,8 +92,8 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst;
internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 93de786..6d5aeeb 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -62,7 +62,6 @@
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_memcpy.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -85,6 +84,7 @@
*/
#define RTE_TEST_RX_DESC_DEFAULT 128
#define RTE_TEST_TX_DESC_DEFAULT 512
+
static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
@@ -104,6 +104,11 @@ struct pkt_buffer {
struct rte_mbuf *buffer[MAX_PKT_BURST];
};
+struct op_buffer {
+ unsigned len;
+ struct rte_crypto_op *buffer[MAX_PKT_BURST];
+};
+
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
@@ -112,6 +117,12 @@ enum l2fwd_crypto_xform_chain {
L2FWD_CRYPTO_HASH_CIPHER
};
+struct l2fwd_key {
+ uint8_t *data;
+ uint32_t length;
+ phys_addr_t phys_addr;
+};
+
/** l2fwd crypto application command line options */
struct l2fwd_crypto_options {
unsigned portmask;
@@ -127,7 +138,7 @@ struct l2fwd_crypto_options {
struct rte_crypto_sym_xform cipher_xform;
uint8_t ckey_data[32];
- struct rte_crypto_sym_key iv_key;
+ struct l2fwd_key iv_key;
uint8_t ivkey_data[16];
struct rte_crypto_sym_xform auth_xform;
@@ -142,10 +153,7 @@ struct l2fwd_crypto_params {
unsigned digest_length;
unsigned block_size;
- struct {
- uint8_t *data;
- uint16_t length;
- } iv;
+ struct l2fwd_key iv_key;
struct rte_cryptodev_sym_session *session;
};
@@ -157,14 +165,16 @@ struct lcore_queue_conf {
unsigned nb_crypto_devs;
unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
- struct pkt_buffer crypto_pkt_buf[RTE_MAX_ETHPORTS];
- struct pkt_buffer tx_pkt_buf[RTE_MAX_ETHPORTS];
+ struct op_buffer op_buf[RTE_MAX_ETHPORTS];
+ struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
} __rte_cache_aligned;
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
static const struct rte_eth_conf port_conf = {
.rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+ .max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
@@ -178,7 +188,7 @@ static const struct rte_eth_conf port_conf = {
};
struct rte_mempool *l2fwd_pktmbuf_pool;
-struct rte_mempool *l2fwd_mbuf_ol_pool;
+struct rte_mempool *l2fwd_crypto_op_pool;
/* Per-port statistics struct */
struct l2fwd_port_statistics {
@@ -203,7 +213,7 @@ struct l2fwd_crypto_statistics crypto_statistics[RTE_MAX_ETHPORTS];
/* A tsc-based timer responsible for triggering statistics printout */
#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
-#define MAX_TIMER_PERIOD 86400 /* 1 day max */
+#define MAX_TIMER_PERIOD 86400UL /* 1 day max */
/* default period is 10 seconds */
static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000;
@@ -292,20 +302,21 @@ static int
l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
struct l2fwd_crypto_params *cparams)
{
- struct rte_mbuf **pkt_buffer;
+ struct rte_crypto_op **op_buffer;
unsigned ret;
- pkt_buffer = (struct rte_mbuf **)
- qconf->crypto_pkt_buf[cparams->dev_id].buffer;
+ op_buffer = (struct rte_crypto_op **)
+ qconf->op_buf[cparams->dev_id].buffer;
+
+ ret = rte_cryptodev_enqueue_burst(cparams->dev_id,
+ cparams->qp_id, op_buffer, (uint16_t) n);
- ret = rte_cryptodev_enqueue_burst(cparams->dev_id, cparams->qp_id,
- pkt_buffer, (uint16_t) n);
crypto_statistics[cparams->dev_id].enqueued += ret;
if (unlikely(ret < n)) {
crypto_statistics[cparams->dev_id].errors += (n - ret);
do {
- rte_pktmbuf_offload_free(pkt_buffer[ret]->offload_ops);
- rte_pktmbuf_free(pkt_buffer[ret]);
+ rte_pktmbuf_free(op_buffer[ret]->sym->m_src);
+ rte_crypto_op_free(op_buffer[ret]);
} while (++ret < n);
}
@@ -313,7 +324,8 @@ l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
}
static int
-l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
+l2fwd_crypto_enqueue(struct rte_crypto_op *op,
+ struct l2fwd_crypto_params *cparams)
{
unsigned lcore_id, len;
struct lcore_queue_conf *qconf;
@@ -321,23 +333,23 @@ l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->crypto_pkt_buf[cparams->dev_id].len;
- qconf->crypto_pkt_buf[cparams->dev_id].buffer[len] = m;
+ len = qconf->op_buf[cparams->dev_id].len;
+ qconf->op_buf[cparams->dev_id].buffer[len] = op;
len++;
- /* enough pkts to be sent */
+ /* enough ops to be sent */
if (len == MAX_PKT_BURST) {
l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams);
len = 0;
}
- qconf->crypto_pkt_buf[cparams->dev_id].len = len;
+ qconf->op_buf[cparams->dev_id].len = len;
return 0;
}
static int
l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
- struct rte_mbuf_offload *ol,
+ struct rte_crypto_op *op,
struct l2fwd_crypto_params *cparams)
{
struct ether_hdr *eth_hdr;
@@ -375,43 +387,43 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_op_attach_sym_session(op, cparams->session);
/* Append space for digest to end of packet */
- ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
+ op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
cparams->digest_length);
- ol->op.crypto.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
- ol->op.crypto.digest.length = cparams->digest_length;
+ op->sym->auth.digest.length = cparams->digest_length;
+
+ op->sym->auth.data.offset = ipdata_offset;
+ op->sym->auth.data.length = data_len;
- ol->op.crypto.iv.data = cparams->iv_key.data;
- ol->op.crypto.iv.phys_addr = cparams->iv_key.phys_addr;
- ol->op.crypto.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_cipher.offset = ipdata_offset;
- ol->op.crypto.data.to_cipher.length = data_len;
+ op->sym->cipher.iv.data = cparams->iv_key.data;
+ op->sym->cipher.iv.phys_addr = cparams->iv_key.phys_addr;
+ op->sym->cipher.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_hash.offset = ipdata_offset;
- ol->op.crypto.data.to_hash.length = data_len;
+ op->sym->cipher.data.offset = ipdata_offset;
+ op->sym->cipher.data.length = data_len;
- rte_pktmbuf_offload_attach(m, ol);
+ op->sym->m_src = m;
- return l2fwd_crypto_enqueue(m, cparams);
+ return l2fwd_crypto_enqueue(op, cparams);
}
/* Send the burst of packets on an output interface */
static int
-l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)
+l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
+ uint8_t port)
{
struct rte_mbuf **pkt_buffer;
unsigned ret;
- unsigned queueid = 0;
- pkt_buffer = (struct rte_mbuf **)qconf->tx_pkt_buf[port].buffer;
+ pkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer;
- ret = rte_eth_tx_burst(port, (uint16_t) queueid, pkt_buffer,
- (uint16_t)n);
+ ret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n);
port_statistics[port].tx += ret;
if (unlikely(ret < n)) {
port_statistics[port].dropped += (n - ret);
@@ -433,8 +445,8 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->tx_pkt_buf[port].len;
- qconf->tx_pkt_buf[port].buffer[len] = m;
+ len = qconf->pkt_buf[port].len;
+ qconf->pkt_buf[port].buffer[len] = m;
len++;
/* enough pkts to be sent */
@@ -443,7 +455,7 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
len = 0;
}
- qconf->tx_pkt_buf[port].len = len;
+ qconf->pkt_buf[port].len = len;
return 0;
}
@@ -503,6 +515,8 @@ static void
l2fwd_main_loop(struct l2fwd_crypto_options *options)
{
struct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST];
+ struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
+
unsigned lcore_id = rte_lcore_id();
uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
unsigned i, j, portid, nb_rx;
@@ -563,12 +577,12 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
if (unlikely(diff_tsc > drain_tsc)) {
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- if (qconf->tx_pkt_buf[portid].len == 0)
+ if (qconf->pkt_buf[portid].len == 0)
continue;
l2fwd_send_burst(&lcore_queue_conf[lcore_id],
- qconf->tx_pkt_buf[portid].len,
+ qconf->pkt_buf[portid].len,
(uint8_t) portid);
- qconf->tx_pkt_buf[portid].len = 0;
+ qconf->pkt_buf[portid].len = 0;
}
/* if timer is enabled */
@@ -597,8 +611,6 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
* Read packet from RX queues
*/
for (i = 0; i < qconf->nb_rx_ports; i++) {
- struct rte_mbuf_offload *ol;
-
portid = qconf->rx_port_list[i];
cparams = &port_cparams[i];
@@ -608,44 +620,49 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
port_statistics[portid].rx += nb_rx;
- /* Enqueue packets from Crypto device*/
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- ol = rte_pktmbuf_offload_alloc(
- l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
+ if (nb_rx) {
/*
- * If we can't allocate a offload, then drop
+ * If we can't allocate a crypto_ops, then drop
* the rest of the burst and dequeue and
* process the packets to free offload structs
*/
- if (unlikely(ol == NULL)) {
- for (; j < nb_rx; j++) {
- rte_pktmbuf_free(pkts_burst[j]);
- port_statistics[portid].dropped++;
- }
- break;
+ if (rte_crypto_op_bulk_alloc(
+ l2fwd_crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops_burst, nb_rx) !=
+ nb_rx) {
+ for (j = 0; j < nb_rx; j++)
+ rte_pktmbuf_free(pkts_burst[i]);
+
+ nb_rx = 0;
}
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- rte_prefetch0((void *)ol);
+ /* Enqueue packets from Crypto device*/
+ for (j = 0; j < nb_rx; j++) {
+ m = pkts_burst[j];
- l2fwd_simple_crypto_enqueue(m, ol, cparams);
+ l2fwd_simple_crypto_enqueue(m,
+ ops_burst[j], cparams);
+ }
}
/* Dequeue packets from Crypto device */
- nb_rx = rte_cryptodev_dequeue_burst(
- cparams->dev_id, cparams->qp_id,
- pkts_burst, MAX_PKT_BURST);
- crypto_statistics[cparams->dev_id].dequeued += nb_rx;
-
- /* Forward crypto'd packets */
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- rte_pktmbuf_offload_free(m->offload_ops);
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- l2fwd_simple_forward(m, portid);
- }
+ do {
+ nb_rx = rte_cryptodev_dequeue_burst(
+ cparams->dev_id, cparams->qp_id,
+ ops_burst, MAX_PKT_BURST);
+
+ crypto_statistics[cparams->dev_id].dequeued +=
+ nb_rx;
+
+ /* Forward crypto'd packets */
+ for (j = 0; j < nb_rx; j++) {
+ m = ops_burst[j]->sym->m_src;
+
+ rte_crypto_op_free(ops_burst[j]);
+ l2fwd_simple_forward(m, portid);
+ }
+ } while (nb_rx == MAX_PKT_BURST);
}
}
}
@@ -748,8 +765,8 @@ parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
/** Parse crypto key command line argument */
static int
-parse_key(struct rte_crypto_sym_key *key __rte_unused,
- unsigned length __rte_unused, char *arg __rte_unused)
+parse_key(struct l2fwd_key *key __rte_unused, unsigned length __rte_unused,
+ char *arg __rte_unused)
{
printf("Currently an unsupported argument!\n");
return -1;
@@ -759,26 +776,20 @@ parse_key(struct rte_crypto_sym_key *key __rte_unused,
static int
parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg)
{
- if (strcmp("SHA1", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA1;
+ if (strcmp("MD5_HMAC", optarg) == 0) {
+ *algo = RTE_CRYPTO_AUTH_MD5_HMAC;
return 0;
} else if (strcmp("SHA1_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
return 0;
- } else if (strcmp("SHA224", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA224;
- return 0;
} else if (strcmp("SHA224_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
return 0;
- } else if (strcmp("SHA256", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA256;
- return 0;
} else if (strcmp("SHA256_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
return 0;
- } else if (strcmp("SHA512", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA256;
+ } else if (strcmp("SHA384_HMAC", optarg) == 0) {
+ *algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
return 0;
} else if (strcmp("SHA512_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
@@ -809,7 +820,7 @@ static int
l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
struct option *lgopts, int option_index)
{
- if (strcmp(lgopts[option_index].name, "cdev_type") == 0)
+ if (strcmp(lgopts[option_index].name, "cdev") == 0)
return parse_cryptodev_type(&options->cdev_type, optarg);
else if (strcmp(lgopts[option_index].name, "chain") == 0)
@@ -824,11 +835,17 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return parse_cipher_op(&options->cipher_xform.cipher.op,
optarg);
- else if (strcmp(lgopts[option_index].name, "cipher_key") == 0)
- return parse_key(&options->cipher_xform.cipher.key,
- sizeof(options->ckey_data), optarg);
+ else if (strcmp(lgopts[option_index].name, "cipher_key") == 0) {
+ struct l2fwd_key key = { 0 };
+ int retval = 0;
+
+ retval = parse_key(&key, sizeof(options->ckey_data), optarg);
- else if (strcmp(lgopts[option_index].name, "iv") == 0)
+ options->cipher_xform.cipher.key.data = key.data;
+ options->cipher_xform.cipher.key.length = key.length;
+
+ return retval;
+ } else if (strcmp(lgopts[option_index].name, "iv") == 0)
return parse_key(&options->iv_key, sizeof(options->ivkey_data),
optarg);
@@ -841,11 +858,17 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return parse_auth_op(&options->auth_xform.auth.op,
optarg);
- else if (strcmp(lgopts[option_index].name, "auth_key") == 0)
- return parse_key(&options->auth_xform.auth.key,
- sizeof(options->akey_data), optarg);
+ else if (strcmp(lgopts[option_index].name, "auth_key") == 0) {
+ struct l2fwd_key key = { 0 };
+ int retval = 0;
+
+ retval = parse_key(&key, sizeof(options->akey_data), optarg);
- else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
+ options->auth_xform.auth.key.data = key.data;
+ options->auth_xform.auth.key.length = key.length;
+
+ return retval;
+ } else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
options->sessionless = 1;
return 0;
}
@@ -905,16 +928,16 @@ l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options,
const char *q_arg)
{
char *end = NULL;
- long int n;
+ unsigned long n;
/* parse number string */
- n = strtol(q_arg, &end, 10);
+ n = (unsigned)strtol(q_arg, &end, 10);
if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
n = 0;
if (n >= MAX_TIMER_PERIOD) {
- printf("Warning refresh period specified %ld is greater than "
- "max value %d! using max value",
+ printf("Warning refresh period specified %lu is greater than "
+ "max value %lu! using max value",
n, MAX_TIMER_PERIOD);
n = MAX_TIMER_PERIOD;
}
@@ -934,13 +957,13 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
options->nb_ports_per_lcore = 1;
options->refresh_period = 10000;
options->single_lcore = 0;
+ options->sessionless = 0;
options->cdev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
- options->sessionless = 0;
options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
/* Cipher Data */
- options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ options->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
options->cipher_xform.next = NULL;
options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -949,12 +972,11 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
generate_random_key(options->ckey_data, sizeof(options->ckey_data));
options->cipher_xform.cipher.key.data = options->ckey_data;
- options->cipher_xform.cipher.key.phys_addr = 0;
options->cipher_xform.cipher.key.length = 16;
/* Authentication Data */
- options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ options->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
options->auth_xform.next = NULL;
options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
@@ -966,7 +988,6 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
generate_random_key(options->akey_data, sizeof(options->akey_data));
options->auth_xform.auth.key.data = options->akey_data;
- options->auth_xform.auth.key.phys_addr = 0;
options->auth_xform.auth.key.length = 20;
}
@@ -993,39 +1014,6 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
printf("sessionless crypto: %s\n",
options->sessionless ? "enabled" : "disabled");
-#if 0
- options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
-
- /* Cipher Data */
- options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
- options->cipher_xform.next = NULL;
-
- options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- options->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
-
- generate_random_key(options->ckey_data, sizeof(options->ckey_data));
-
- options->cipher_xform.cipher.key.data = options->ckey_data;
- options->cipher_xform.cipher.key.phys_addr = 0;
- options->cipher_xform.cipher.key.length = 16;
-
-
- /* Authentication Data */
- options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
- options->auth_xform.next = NULL;
-
- options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
-
- options->auth_xform.auth.add_auth_data_length = 0;
- options->auth_xform.auth.digest_length = 20;
-
- generate_random_key(options->akey_data, sizeof(options->akey_data));
-
- options->auth_xform.auth.key.data = options->akey_data;
- options->auth_xform.auth.key.phys_addr = 0;
- options->auth_xform.auth.key.length = 20;
-#endif
}
/* Parse the argument given in the command line of the application */
@@ -1039,7 +1027,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
static struct option lgopts[] = {
{ "sessionless", no_argument, 0, 0 },
- { "cdev_type", required_argument, 0, 0 },
+ { "cdev", required_argument, 0, 0 },
{ "chain", required_argument, 0, 0 },
{ "cipher_algo", required_argument, 0, 0 },
@@ -1053,6 +1041,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
{ "iv", required_argument, 0, 0 },
{ "sessionless", no_argument, 0, 0 },
+
{ NULL, 0, 0, 0 }
};
@@ -1370,15 +1359,17 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n");
/* create the mbuf pool */
- l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 128,
- 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512,
+ sizeof(struct rte_crypto_op),
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
if (l2fwd_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
/* create crypto op pool */
- l2fwd_mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "mbuf_offload_pool", NB_MBUF, 128, 0, rte_socket_id());
- if (l2fwd_mbuf_ol_pool == NULL)
+ l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, 0,
+ rte_socket_id());
+ if (l2fwd_crypto_op_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
/* Enable Ethernet ports */
diff --git a/lib/Makefile b/lib/Makefile
index ef172ea..4c5c1b4 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -36,7 +36,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_EAL) += librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_RING) += librte_ring
DIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += librte_mempool
DIRS-$(CONFIG_RTE_LIBRTE_MBUF) += librte_mbuf
-DIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += librte_mbuf_offload
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += librte_timer
DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile
DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index df0c0b8..89f4d38 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -44,7 +44,372 @@
extern "C" {
#endif
-#include <rte_crypto_sym.h>
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+#include "rte_crypto_sym.h"
+
+/** Crypto operation types */
+enum rte_crypto_op_type {
+ RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ /**< Undefined operation type */
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ /**< Symmetric operation */
+};
+
+/** Status of crypto operation */
+enum rte_crypto_op_status {
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ /**< Operation completed successfully */
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+ /**< Operation has not yet been processed by a crypto device */
+ RTE_CRYPTO_OP_STATUS_ENQUEUED,
+ /**< Operation is enqueued on device */
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
+ /**< Authentication verification failed */
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION,
+ /**<
+ * Symmetric operation failed due to invalid session arguments, or if
+ * in session-less mode, failed to allocate private operation material.
+ */
+ RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
+ /**< Operation failed due to invalid arguments in request */
+ RTE_CRYPTO_OP_STATUS_ERROR,
+ /**< Error handling operation */
+};
+
+/**
+ * Cryptographic Operation.
+ *
+ * This structure contains data relating to performing cryptographic
+ * operations. This operation structure is used to contain any operation which
+ * is supported by the cryptodev API, PMDs should check the type parameter to
+ * verify that the operation is a support function of the device. Crypto
+ * operations are enqueued and dequeued in crypto PMDs using the
+ * rte_cryptodev_enqueue_burst() / rte_cryptodev_dequeue_burst() .
+ */
+struct rte_crypto_op {
+ enum rte_crypto_op_type type;
+ /**< operation type */
+
+ enum rte_crypto_op_status status;
+ /**<
+ * operation status - this is reset to
+ * RTE_CRYPTO_OP_STATUS_NOT_PROCESSED on allocation from mempool and
+ * will be set to RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation
+ * is successfully processed by a crypto PMD
+ */
+
+ struct rte_mempool *mempool;
+ /**< crypto operation mempool which operation is allocated from */
+
+ phys_addr_t phys_addr;
+ /**< physical address of crypto operation */
+
+ void *opaque_data;
+ /**< Opaque pointer for user data */
+
+ union {
+ struct rte_crypto_sym_op *sym;
+ /**< Symmetric operation parameters */
+ }; /**< operation specific parameters */
+} __rte_cached_aligned;
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ * @param type The crypto operation type.
+ */
+static inline void
+__rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type)
+{
+ op->type = type;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (type) {
+ case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
+ /** Symmetric operation structure starts after the end of the
+ * rte_crypto_op structure.
+ */
+ op->sym = (struct rte_crypto_sym_op *)(op + 1);
+ op->type = type;
+
+ __rte_crypto_sym_op_reset(op->sym);
+ break;
+ default:
+ break;
+ }
+
+ op->opaque_data = NULL;
+}
+
+/**
+ * Private data structure belonging to a crypto symmetric operation pool.
+ */
+struct rte_crypto_op_pool_private {
+ enum rte_crypto_op_type type;
+ /**< Crypto op pool type operation. */
+ uint16_t priv_size;
+ /**< Size of private area in each crypto operation. */
+};
+
+
+/**
+ * Returns the size of private data allocated with each rte_crypto_op object by
+ * the mempool
+ *
+ * @param mempool rte_crypto_op mempool
+ *
+ * @return private data size
+ */
+static inline uint16_t
+__rte_crypto_op_get_priv_data_size(struct rte_mempool *mempool)
+{
+ struct rte_crypto_op_pool_private *priv =
+ rte_mempool_get_priv(mempool);
+
+ return priv->priv_size;
+}
+
+
+/**
+ * Creates a crypto operation pool
+ *
+ * @param name pool name
+ * @param type crypto operation type, use
+ * RTE_CRYPTO_OP_TYPE_UNDEFINED for a pool which
+ * supports all operation types
+ * @param nb_elts number of elements in pool
+ * @param cache_size Number of elements to cache on lcore, see
+ * *rte_mempool_create* for further details about
+ * cache size
+ * @param priv_size Size of private data to allocate with each
+ * operation
+ * @param socket_id Socket to allocate memory on
+ *
+ * @return
+ * - On success pointer to mempool
+ * - On failure NULL
+ */
+extern struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id);
+
+/**
+ * Bulk allocate raw element from mempool and return as crypto operations
+ *
+ * @param mempool crypto operation mempool.
+ * @param type crypto operation type.
+ * @param ops Array to place allocated crypto operations
+ * @param nb_ops Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns number of ops allocated
+ */
+static inline int
+__rte_crypto_op_raw_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_crypto_op_pool_private *priv;
+ void *burst[nb_ops];
+
+ priv = rte_mempool_get_priv(mempool);
+ if (unlikely(priv->type != type &&
+ priv->type != RTE_CRYPTO_OP_TYPE_UNDEFINED))
+ return -EINVAL;
+
+ if (rte_mempool_get_bulk(mempool, (void **)burst, nb_ops)) {
+ *ops = (struct rte_crypto_op *)&burst[0];
+ return nb_ops;
+ }
+
+ return 0;
+}
+
+/**
+ * Allocate a crypto operation from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_op *
+rte_crypto_op_alloc(struct rte_mempool *mempool, enum rte_crypto_op_type type)
+{
+ struct rte_crypto_op *op = NULL;
+ int retval;
+
+ retval = __rte_crypto_op_raw_bulk_alloc(mempool, type, &op, 1);
+ if (unlikely(retval < 1))
+ return NULL;
+
+ __rte_crypto_op_reset(op, type);
+
+ return op;
+}
+
+
+/**
+ * Bulk allocate crypto operations from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ * @param ops Array to place allocated crypto operations
+ * @param nb_ops Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+
+static inline unsigned
+rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ if (unlikely(__rte_crypto_op_raw_bulk_alloc(mempool, type, ops, nb_ops)
+ < 1))
+ return 0;
+
+ for (i = 0; i < nb_ops; i++)
+ __rte_crypto_op_reset(ops[i], type);
+
+ return nb_ops;
+}
+
+
+
+/**
+ * Returns a pointer to the private data of a crypto operation if
+ * that operation has enough capacity for requested size.
+ *
+ * @param op crypto operation.
+ * @param size size of space requested in private data.
+ *
+ * @returns
+ * - if sufficient space available returns pointer to start of private data
+ * - if insufficient space returns NULL
+ */
+static inline void *
+__rte_crypto_op_get_priv_data(struct rte_crypto_op *op, uint32_t size)
+{
+ uint32_t priv_size;
+
+ if (likely(op->mempool != NULL)) {
+ priv_size = __rte_crypto_op_get_priv_data_size(op->mempool);
+
+ if (likely(priv_size >= size))
+ return (void *)((uint8_t *)(op + 1) +
+ sizeof(struct rte_crypto_sym_op));
+ }
+
+ return NULL;
+}
+
+/**
+ * free crypto operation structure
+ * If operation has been allocate from a rte_mempool, then the operation will
+ * be returned to the mempool.
+ *
+ * @param op symmetric crypto operation
+ */
+static inline void
+rte_crypto_op_free(struct rte_crypto_op *op)
+{
+ if (op != NULL && op->mempool != NULL)
+ rte_mempool_put(op->mempool, op);
+}
+
+/**
+ * Allocate a symmetric crypto operation in the private data of an mbuf.
+ *
+ * @param m mbuf which is associated with the crypto operation, the
+ * operation will be allocated in the private data of that
+ * mbuf.
+ *
+ * @returns
+ * - On success returns a pointer to the crypto operation.
+ * - On failure returns NULL.
+ */
+static inline struct rte_crypto_op *
+rte_crypto_sym_op_alloc_from_mbuf_priv_data(struct rte_mbuf *m)
+{
+ if (unlikely(m == NULL))
+ return NULL;
+
+ /*
+ * check that the mbuf's private data size is sufficient to contain a
+ * crypto operation
+ */
+ if (unlikely(m->priv_size < (sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op))))
+ return NULL;
+
+ /* private data starts immediately after the mbuf header in the mbuf. */
+ struct rte_crypto_op *op = (struct rte_crypto_op *)(m + 1);
+
+ __rte_crypto_op_reset(op, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+
+ op->mempool = NULL;
+ op->sym->m_src = m;
+
+ return op;
+}
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type and configures
+ * the chaining of the xforms in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+rte_crypto_op_sym_xforms_alloc(struct rte_crypto_op *op, uint8_t nb_xforms)
+{
+ void *priv_data;
+ uint32_t size;
+
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return NULL;
+
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
+
+ priv_data = __rte_crypto_op_get_priv_data(op, size);
+ if (priv_data == NULL)
+ return NULL;
+
+ return __rte_crypto_sym_op_sym_xforms_alloc(op->sym, priv_data,
+ nb_xforms);
+}
+
+
+/**
+ * Attach a session to a crypto operation
+ *
+ * @param op crypto operation, must be of type symmetric
+ * @param sess cryptodev session
+ */
+static inline int
+rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return -1;
+
+ return __rte_crypto_sym_op_attach_sym_session(op->sym, sess);
+}
#ifdef __cplusplus
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index cb2b8f6..88f2727 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -46,6 +46,8 @@
extern "C" {
#endif
+#include <string.h>
+
#include <rte_mbuf.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -346,37 +348,24 @@ enum rte_crypto_sym_op_sess_type {
};
-/** Status of crypto operation */
-enum rte_crypto_op_status {
- RTE_CRYPTO_OP_STATUS_SUCCESS,
- /**< Operation completed successfully */
- RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
- /**< Operation not yet submitted to a cryptodev */
- RTE_CRYPTO_OP_STATUS_ENQUEUED,
- /**< Operation is enqueued on device */
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
- /**< Authentication verification failed */
- RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
- /**< Operation failed due to invalid arguments in request */
- RTE_CRYPTO_OP_STATUS_ERROR,
- /**< Error handling operation */
-};
+struct rte_cryptodev_sym_session;
/**
- * Cryptographic Operation Data.
+ * Symmetric Cryptographic Operation.
+ *
+ * This structure contains data relating to performing symmetric cryptographic
+ * processing on a referenced mbuf data buffer.
*
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
- * call for performing cipher, hash, or a combined hash and cipher operations.
+ * When a symmetric crypto operation is enqueued with the device for processing
+ * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
+ * which contains the source data which the crypto operation is to be performed
+ * on.
*/
struct rte_crypto_sym_op {
- enum rte_crypto_sym_op_sess_type type;
- enum rte_crypto_op_status status;
+ struct rte_mbuf *m_src; /**< source mbuf */
+ struct rte_mbuf *m_dst; /**< destination mbuf */
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
+ enum rte_crypto_sym_op_sess_type type;
union {
struct rte_cryptodev_sym_session *session;
@@ -387,7 +376,7 @@ struct rte_crypto_sym_op {
struct {
struct {
- uint32_t offset;
+ uint32_t offset;
/**< Starting point for cipher processing, specified
* as number of bytes from start of data in the source
* buffer. The result of the cipher operation will be
@@ -395,7 +384,7 @@ struct rte_crypto_sym_op {
* this location.
*/
- uint32_t length;
+ uint32_t length;
/**< The message length, in bytes, of the source buffer
* on which the cryptographic operation will be
* computed. This must be a multiple of the block size
@@ -414,17 +403,68 @@ struct rte_crypto_sym_op {
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
* field should be set to 0.
*/
- } to_cipher; /**< Data offsets and length for ciphering */
+ } data; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * Initialisation Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length
+ * is 96 bits) or J0 (for other sizes), where J0 is as
+ * defined by NIST SP800-38D. Regardless of the IV
+ * length, a full 16 bytes needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the
+ * nonce should be written starting at &iv[1] (to allow
+ * space for the implementation to write in the flags
+ * in the first byte). Note that a full 16 bytes should
+ * be allocated, even though the length field will
+ * have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from
+ * IEEE Std 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD
+ * be 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ uint16_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * length of the IV (which must be the same as the
+ * block length of the cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length
+ * of the counter (which must be the same as the block
+ * length of the cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs)
+ * or 16, in which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce,
+ * which can be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+ } cipher;
+ struct {
struct {
- uint32_t offset;
+ uint32_t offset;
/**< Starting point for hash processing, specified as
* number of bytes from start of packet in source
* buffer.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
+ * ignored. The field @ref aad field
* should be set instead.
*
* @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
@@ -432,180 +472,169 @@ struct rte_crypto_sym_op {
* of the AAD data in the source buffer.
*/
- uint32_t length;
+ uint32_t length;
/**< The message length, in bytes, of the source
* buffer that the hash will be computed on.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
+ * ignored. The field @ref aad field should be set
+ * instead.
*
* @note
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
* of operation, this field specifies the length of
* the AAD data in the source buffer.
*/
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
-
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
+ } data; /**< Data offsets and length for authentication */
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result
+ * should be inserted (in the case of digest generation)
+ * or where the purported digest exists (in the case of
+ * digest verification).
+ *
+ * At session creation time, the client specified the
+ * digest result length with the digest_length member
+ * of the @ref rte_crypto_auth_xform structure. For
+ * physical crypto devices the caller must allocate at
+ * least digest_length of physically contiguous memory
+ * at this location.
+ *
+ * For digest generation, the digest result will
+ * overwrite any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is
+ * understood to be in the destination buffer for
+ * digest generation, and in the source buffer for
+ * digest verification. The location of the digest
+ * result in this case is immediately following the
+ * region over which the digest is computed.
+ */
+ phys_addr_t phys_addr;
+ /**< Physical address of digest */
+ uint16_t length;
+ /**< Length of digest */
+ } digest; /**< Digest parameters */
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_sym_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
-
- void *user_data;
- /**< opaque pointer for user data */
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD)
+ * needed for authenticated cipher mechanisms (CCM and
+ * GCM), and to the IV for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is
+ * set up for the session in the @ref
+ * rte_crypto_auth_xform structure as part of the @ref
+ * rte_cryptodev_session_create function call. This
+ * length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM),
+ * the caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset
+ * of one byte into the array, leaving room for the
+ * implementation to write in the flags to the first
+ * byte.
+ *
+ * - the additional authentication data itself should
+ * be written starting at an offset of 18 bytes into
+ * the array, leaving room for the length encoding in
+ * the first two bytes of the second block.
+ *
+ * - the array should be big enough to hold the above
+ * fields, plus any padding to round this up to the
+ * nearest multiple of the block size (16 bytes).
+ * Padding will be added by the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus
+ * any space to round this up to the nearest multiple
+ * of the block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set
+ * to 0. Instead the AAD data should be placed in the
+ * source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint16_t length; /**< Length of digest */
+ } aad;
+ /**< Additional authentication parameters */
+ } auth;
};
/**
- * Reset the fields of a crypto operation to their default values.
+ * Reset the fields of a symmetric operation to their default values.
*
* @param op The crypto operation to be reset.
*/
static inline void
__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
{
+ memset(op, 0, sizeof(*op));
+
op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
}
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type to
+ * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
+ * in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+__rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
+ void *priv_data, uint8_t nb_xforms)
+{
+ struct rte_crypto_sym_xform *xform;
+
+ sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
+
+ do {
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
+ xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
+ } while (xform);
+
+ return sym_op->xform;
+}
+
+
+/**
+ * Attach a session to a symmetric crypto operation
+ *
+ * @param op crypto operation
+ * @param sess cryptodev session
+ */
+static inline int
+__rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
struct rte_cryptodev_sym_session *sess)
{
- op->session = sess;
- op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+ sym_op->session = sess;
+ sym_op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+
+ return 0;
}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index c7fef6a..4632ca3 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1056,3 +1056,79 @@ rte_cryptodev_sym_session_free(uint8_t dev_id,
return NULL;
}
+
+/** Initialise rte_crypto_op mempool element */
+static void
+rte_crypto_op_init(struct rte_mempool *mempool,
+ void *opaque_arg,
+ void *_op_data,
+ __rte_unused unsigned i)
+{
+ struct rte_crypto_op *op = _op_data;
+ enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
+
+ memset(_op_data, 0, mempool->elt_size);
+
+ __rte_crypto_op_reset(op, type);
+
+ op->phys_addr = rte_mem_virt2phy(_op_data);
+ op->mempool = mempool;
+}
+
+
+struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id)
+{
+ struct rte_crypto_op_pool_private *priv;
+
+ unsigned elt_size = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) +
+ priv_size;
+
+ /* lookup mempool in case already allocated */
+ struct rte_mempool *mp = rte_mempool_lookup(name);
+
+ if (mp != NULL) {
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ if (mp->elt_size != elt_size ||
+ mp->cache_size < cache_size ||
+ mp->size < nb_elts ||
+ priv->priv_size < priv_size) {
+ mp = NULL;
+ CDEV_LOG_ERR("Mempool %s already exists but with "
+ "incompatible parameters", name);
+ return NULL;
+ }
+ return mp;
+ }
+
+ mp = rte_mempool_create(
+ name,
+ nb_elts,
+ elt_size,
+ cache_size,
+ sizeof(struct rte_crypto_op_pool_private),
+ NULL,
+ NULL,
+ rte_crypto_op_init,
+ &type,
+ socket_id,
+ 0);
+
+ if (mp == NULL) {
+ CDEV_LOG_ERR("Failed to create mempool %s", name);
+ return NULL;
+ }
+
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ priv->priv_size = priv_size;
+ priv->type = type;
+
+ return mp;
+}
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 732e2b9..b63cb57 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -48,8 +48,6 @@
extern "C" {
#endif
-#include "stddef.h"
-
#include "rte_crypto.h"
#include "rte_dev.h"
@@ -58,15 +56,18 @@ extern "C" {
#define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd")
/**< AES-NI Multi buffer PMD device name */
#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd")
-/**< Intel QAT SYM PMD device name */
+/**< Intel QAT Symmetric Crypto PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
- RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT SYM PMD */
+ RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT PMD Symmetric Crypto */
};
+
+extern const char **rte_cyptodev_names;
+
/* Logging Macros */
#define CDEV_LOG_ERR(fmt, args...) \
@@ -411,12 +412,12 @@ rte_cryptodev_callback_unregister(uint8_t dev_id,
rte_cryptodev_cb_fn cb_fn, void *cb_arg);
-typedef uint16_t (*dequeue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Dequeue processed packets from queue pair of a device. */
-typedef uint16_t (*enqueue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Enqueue packets for processing on queue pair of a device. */
@@ -489,66 +490,65 @@ struct rte_cryptodev_data {
extern struct rte_cryptodev *rte_cryptodevs;
/**
*
- * Dequeue a burst of processed packets from a queue of the crypto device.
- * The dequeued packets are stored in *rte_mbuf* structures whose pointers are
- * supplied in the *pkts* array.
+ * Dequeue a burst of processed crypto operations from a queue on the crypto
+ * device. The dequeued operation are stored in *rte_crypto_op* structures
+ * whose pointers are supplied in the *ops* array.
*
- * The rte_crypto_dequeue_burst() function returns the number of packets
- * actually dequeued, which is the number of *rte_mbuf* data structures
- * effectively supplied into the *pkts* array.
+ * The rte_cryptodev_dequeue_burst() function returns the number of ops
+ * actually dequeued, which is the number of *rte_crypto_op* data structures
+ * effectively supplied into the *ops* array.
*
- * A return value equal to *nb_pkts* indicates that the queue contained
- * at least *rx_pkts* packets, and this is likely to signify that other
- * received packets remain in the input queue. Applications implementing
- * a "retrieve as much received packets as possible" policy can check this
- * specific case and keep invoking the rte_crypto_dequeue_burst() function
- * until a value less than *nb_pkts* is returned.
+ * A return value equal to *nb_ops* indicates that the queue contained
+ * at least *nb_ops* operations, and this is likely to signify that other
+ * processed operations remain in the devices output queue. Applications
+ * implementing a "retrieve as many processed operations as possible" policy
+ * can check this specific case and keep invoking the
+ * rte_cryptodev_dequeue_burst() function until a value less than
+ * *nb_ops* is returned.
*
- * The rte_crypto_dequeue_burst() function does not provide any error
+ * The rte_cryptodev_dequeue_burst() function does not provide any error
* notification to avoid the corresponding overhead.
*
- * @param dev_id The identifier of the device.
+ * @param dev_id The symmetric crypto device identifier
* @param qp_id The index of the queue pair from which to
* retrieve processed packets. The value must be
* in the range [0, nb_queue_pair - 1] previously
* supplied to rte_cryptodev_configure().
- * @param pkts The address of an array of pointers to
- * *rte_mbuf* structures that must be large enough
- * to store *nb_pkts* pointers in it.
- * @param nb_pkts The maximum number of packets to dequeue.
+ * @param ops The address of an array of pointers to
+ * *rte_crypto_op* structures that must be
+ * large enough to store *nb_ops* pointers in it.
+ * @param nb_ops The maximum number of operations to dequeue.
*
* @return
- * - The number of packets actually dequeued, which is the number
- * of pointers to *rte_mbuf* structures effectively supplied to the
- * *pkts* array.
+ * - The number of operations actually dequeued, which is the number
+ * of pointers to *rte_crypto_op* structures effectively supplied to the
+ * *ops* array.
*/
static inline uint16_t
rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
- nb_pkts = (*dev->dequeue_burst)
- (dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ nb_ops = (*dev->dequeue_burst)
+ (dev->data->queue_pairs[qp_id], ops, nb_ops);
- return nb_pkts;
+ return nb_ops;
}
/**
- * Enqueue a burst of packets for processing on a crypto device.
- *
- * The rte_crypto_enqueue_burst() function is invoked to place packets
- * on the queue *queue_id* of the device designated by its *dev_id*.
+ * Enqueue a burst of operations for processing on a crypto device.
*
- * The *nb_pkts* parameter is the number of packets to process which are
- * supplied in the *pkts* array of *rte_mbuf* structures.
+ * The rte_cryptodev_enqueue_burst() function is invoked to place
+ * crypto operations on the queue *qp_id* of the device designated by
+ * its *dev_id*.
*
- * The rte_crypto_enqueue_burst() function returns the number of packets it
- * actually sent. A return value equal to *nb_pkts* means that all packets
- * have been sent.
+ * The *nb_ops* parameter is the number of operations to process which are
+ * supplied in the *ops* array of *rte_crypto_op* structures.
*
- * Each mbuf in the *pkts* array must have a valid *rte_mbuf_offload* structure
- * attached which contains a valid crypto operation.
+ * The rte_cryptodev_enqueue_burst() function returns the number of
+ * operations it actually enqueued for processing. A return value equal to
+ * *nb_ops* means that all packets have been enqueued.
*
* @param dev_id The identifier of the device.
* @param qp_id The index of the queue pair which packets are
@@ -556,25 +556,25 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
* must be in the range [0, nb_queue_pairs - 1]
* previously supplied to
* *rte_cryptodev_configure*.
- * @param pkts The address of an array of *nb_pkts* pointers
- * to *rte_mbuf* structures which contain the
- * output packets.
- * @param nb_pkts The number of packets to transmit.
+ * @param ops The address of an array of *nb_ops* pointers
+ * to *rte_crypto_op* structures which contain
+ * the crypto operations to be processed.
+ * @param nb_ops The number of operations to process.
*
* @return
- * The number of packets actually enqueued on the crypto device. The return
- * value can be less than the value of the *nb_pkts* parameter when the
- * crypto devices queue is full or has been filled up.
- * The number of packets is 0 if the device hasn't been started.
+ * The number of operations actually enqueued on the crypto device. The return
+ * value can be less than the value of the *nb_ops* parameter when the
+ * crypto devices queue is full or if invalid parameters are specified in
+ * a *rte_crypto_op*.
*/
static inline uint16_t
rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
return (*dev->enqueue_burst)(
- dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ dev->data->queue_pairs[qp_id], ops, nb_ops);
}
@@ -620,7 +620,6 @@ extern struct rte_cryptodev_sym_session *
rte_cryptodev_sym_session_create(uint8_t dev_id,
struct rte_crypto_sym_xform *xform);
-
/**
* Free the memory associated with a previously allocated session.
*
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index ff8e93d..39cd9ec 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -1,4 +1,4 @@
-DPDK_2.2 {
+DPDK_16.04 {
global:
rte_cryptodevs;
@@ -27,6 +27,7 @@ DPDK_2.2 {
rte_cryptodev_queue_pair_setup;
rte_cryptodev_queue_pair_start;
rte_cryptodev_queue_pair_stop;
+ rte_crypto_op_pool_create;
local: *;
};
\ No newline at end of file
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index c973e9b..400a849 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -728,9 +728,6 @@ typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */
typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
* with a single assignment */
-/** Opaque rte_mbuf_offload structure declarations */
-struct rte_mbuf_offload;
-
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
@@ -847,9 +844,6 @@ struct rte_mbuf {
/** Timesync flags for use with IEEE1588. */
uint16_t timesync;
-
- /* Chain of off-load operations to perform on mbuf */
- struct rte_mbuf_offload *offload_ops;
} __rte_cache_aligned;
static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
diff --git a/lib/librte_mbuf_offload/Makefile b/lib/librte_mbuf_offload/Makefile
deleted file mode 100644
index acdb449..0000000
--- a/lib/librte_mbuf_offload/Makefile
+++ /dev/null
@@ -1,52 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2015 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_mbuf_offload.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-EXPORT_MAP := rte_mbuf_offload_version.map
-
-LIBABIVER := 1
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) := rte_mbuf_offload.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD)-include := rte_mbuf_offload.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_cryptodev
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.c b/lib/librte_mbuf_offload/rte_mbuf_offload.c
deleted file mode 100644
index 5c0c9dd..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <rte_common.h>
-
-#include "rte_mbuf_offload.h"
-
-/** Initialize rte_mbuf_offload structure */
-static void
-rte_pktmbuf_offload_init(struct rte_mempool *mp,
- __rte_unused void *opaque_arg,
- void *_op_data,
- __rte_unused unsigned i)
-{
- struct rte_mbuf_offload *ol = _op_data;
-
- memset(_op_data, 0, mp->elt_size);
-
- ol->type = RTE_PKTMBUF_OL_NOT_SPECIFIED;
- ol->mp = mp;
-}
-
-
-struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id)
-{
- struct rte_pktmbuf_offload_pool_private *priv;
- unsigned elt_size = sizeof(struct rte_mbuf_offload) + priv_size;
-
-
- /* lookup mempool in case already allocated */
- struct rte_mempool *mp = rte_mempool_lookup(name);
-
- if (mp != NULL) {
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- if (priv->offload_priv_size < priv_size ||
- mp->elt_size != elt_size ||
- mp->cache_size < cache_size ||
- mp->size < size) {
- mp = NULL;
- return NULL;
- }
- return mp;
- }
-
- mp = rte_mempool_create(
- name,
- size,
- elt_size,
- cache_size,
- sizeof(struct rte_pktmbuf_offload_pool_private),
- NULL,
- NULL,
- rte_pktmbuf_offload_init,
- NULL,
- socket_id,
- 0);
-
- if (mp == NULL)
- return NULL;
-
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- priv->offload_priv_size = priv_size;
- return mp;
-}
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
deleted file mode 100644
index 5ce6058..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_MBUF_OFFLOAD_H_
-#define _RTE_MBUF_OFFLOAD_H_
-
-/**
- * @file
- * RTE mbuf offload
- *
- * The rte_mbuf_offload library provides the ability to specify a device generic
- * off-load operation independent of the current Rx/Tx Ethernet offloads
- * supported within the rte_mbuf structure, and add supports for multiple
- * off-load operations and offload device types.
- *
- * The rte_mbuf_offload specifies the particular off-load operation type, such
- * as a crypto operation, and provides a container for the operations
- * parameter's inside the op union. These parameters are then used by the
- * device which supports that operation to perform the specified offload.
- *
- * This library provides an API to create pre-allocated mempool of offload
- * operations, with supporting allocate and free functions. It also provides
- * APIs for attaching an offload to a mbuf, as well as an API to retrieve a
- * specified offload type from an mbuf offload chain.
- *
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- */
-
-#include <rte_mbuf.h>
-#include <rte_crypto.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** packet mbuf offload operation types */
-enum rte_mbuf_ol_op_type {
- RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
- /**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO_SYM
- /**< Crypto offload operation */
-};
-
-/**
- * Generic packet mbuf offload
- * This is used to specify a offload operation to be performed on a rte_mbuf.
- * Multiple offload operations can be chained to the same mbuf, but only a
- * single offload operation of a particular type can be in the chain
- */
-struct rte_mbuf_offload {
- struct rte_mbuf_offload *next; /**< next offload in chain */
- struct rte_mbuf *m; /**< mbuf offload is attached to */
- struct rte_mempool *mp; /**< mempool offload allocated from */
-
- enum rte_mbuf_ol_op_type type; /**< offload type */
- union {
- struct rte_crypto_sym_op crypto; /**< Crypto operation */
- } op;
-};
-
-/**< private data structure belonging to packet mbug offload mempool */
-struct rte_pktmbuf_offload_pool_private {
- uint16_t offload_priv_size;
- /**< Size of private area in each mbuf_offload. */
-};
-
-
-/**
- * Creates a mempool of rte_mbuf_offload objects
- *
- * EXPERIMENTAL: this API file may change without prior notice
- *
- * @param name mempool name
- * @param size number of objects in mempool
- * @param cache_size cache size of objects for each core
- * @param priv_size size of private data to be allocated with each
- * rte_mbuf_offload object
- * @param socket_id Socket on which to allocate mempool objects
- *
- * @return
- * - On success returns a valid mempool of rte_mbuf_offload objects
- * - On failure return NULL
- */
-extern struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id);
-
-
-/**
- * Returns private data size allocated with each rte_mbuf_offload object by
- * the mempool
- *
- * @param mpool rte_mbuf_offload mempool
- *
- * @return private data size
- */
-static inline uint16_t
-__rte_pktmbuf_offload_priv_size(struct rte_mempool *mpool)
-{
- struct rte_pktmbuf_offload_pool_private *priv =
- (struct rte_pktmbuf_offload_pool_private *)rte_mempool_get_priv(mpool);
-
- return priv->offload_priv_size;
-}
-
-/**
- * Get specified off-load operation type from mbuf.
- *
- * @param m packet mbuf.
- * @param type offload operation type requested.
- *
- * @return
- * - On success retruns rte_mbuf_offload pointer
- * - On failure returns NULL
- *
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_get(struct rte_mbuf *m, enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol;
-
- for (ol = m->offload_ops; ol != NULL; ol = ol->next)
- if (ol->type == type)
- return ol;
-
- return ol;
-}
-
-/**
- * Attach a rte_mbuf_offload to a mbuf. We only support a single offload of any
- * one type in our chain of offloads.
- *
- * @param m packet mbuf.
- * @param ol rte_mbuf_offload strucutre to be attached
- *
- * @returns
- * - On success returns the pointer to the offload we just added
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_attach(struct rte_mbuf *m, struct rte_mbuf_offload *ol)
-{
- struct rte_mbuf_offload **ol_last;
-
- for (ol_last = &m->offload_ops; ol_last[0] != NULL;
- ol_last = &ol_last[0]->next)
- if (ol_last[0]->type == ol->type)
- return NULL;
-
- ol_last[0] = ol;
- ol_last[0]->m = m;
- ol_last[0]->next = NULL;
-
- return ol_last[0];
-}
-
-
-/** Rearms rte_mbuf_offload default parameters */
-static inline void
-__rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
- enum rte_mbuf_ol_op_type type)
-{
- ol->m = NULL;
- ol->type = type;
-
- switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO_SYM:
- __rte_crypto_sym_op_reset(&ol->op.crypto); break;
- default:
- break;
- }
-}
-
-/** Allocate rte_mbuf_offload from mempool */
-static inline struct rte_mbuf_offload *
-__rte_pktmbuf_offload_raw_alloc(struct rte_mempool *mp)
-{
- void *buf = NULL;
-
- if (rte_mempool_get(mp, &buf) < 0)
- return NULL;
-
- return (struct rte_mbuf_offload *)buf;
-}
-
-/**
- * Allocate a rte_mbuf_offload with a specified operation type from
- * rte_mbuf_offload mempool
- *
- * @param mpool rte_mbuf_offload mempool
- * @param type offload operation type
- *
- * @returns
- * - On success returns a valid rte_mbuf_offload structure
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_alloc(struct rte_mempool *mpool,
- enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol = __rte_pktmbuf_offload_raw_alloc(mpool);
-
- if (ol != NULL)
- __rte_pktmbuf_offload_reset(ol, type);
-
- return ol;
-}
-
-/**
- * free rte_mbuf_offload structure
- */
-static inline void
-rte_pktmbuf_offload_free(struct rte_mbuf_offload *ol)
-{
- if (ol != NULL && ol->mp != NULL)
- rte_mempool_put(ol->mp, ol);
-}
-
-/**
- * Checks if the private data of a rte_mbuf_offload has enough capacity for
- * requested size
- *
- * @returns
- * - if sufficient space available returns pointer to start of private data
- * - if insufficient space returns NULL
- */
-static inline void *
-__rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
- uint16_t size)
-{
- uint16_t priv_size;
-
- if (likely(ol->mp != NULL)) {
- priv_size = __rte_pktmbuf_offload_priv_size(ol->mp);
-
- if (likely(priv_size >= size))
- return (void *)(ol + 1);
- }
- return NULL;
-}
-
-/**
- * Allocate space for crypto xforms in the private data space of the
- * rte_mbuf_offload. This also defaults the crypto xform type and configures
- * the chaining of the xform in the crypto operation
- *
- * @return
- * - On success returns pointer to first crypto xform in crypto operations chain
- * - On failure returns NULL
- */
-static inline struct rte_crypto_sym_xform *
-rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
- unsigned nb_xforms)
-{
- struct rte_crypto_sym_xform *xform;
- void *priv_data;
- uint16_t size;
-
- size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
- priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
-
- if (priv_data == NULL)
- return NULL;
-
- ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
-
- do {
- xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
- xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
- } while (xform);
-
- return ol->op.crypto.xform;
-}
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MBUF_OFFLOAD_H_ */
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map b/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
deleted file mode 100644
index 3d3b06a..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
+++ /dev/null
@@ -1,7 +0,0 @@
-DPDK_2.2 {
- global:
-
- rte_pktmbuf_offload_pool_create;
-
- local: *;
-};
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v3 0/2] cryptodev API changes
2016-02-26 17:30 ` [dpdk-dev] [PATCH v3 0/2] cryptodev API changes Declan Doherty
2016-02-26 17:30 ` [dpdk-dev] [PATCH v3 1/2] cryptodev: API tidy and changes to support future extensions Declan Doherty
2016-02-26 17:30 ` [dpdk-dev] [PATCH v3 2/2] cryptodev: change burst API to be crypto op oriented Declan Doherty
@ 2016-02-29 16:00 ` Declan Doherty
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 " Declan Doherty
2016-03-15 7:07 ` [dpdk-dev] [PATCH v3 " Cao, Min
4 siblings, 0 replies; 62+ messages in thread
From: Declan Doherty @ 2016-02-29 16:00 UTC (permalink / raw)
To: dev
On 26/02/16 17:30, Declan Doherty wrote:
> This patch set separates the symmetric crypto operations from generic operations
> and then modifies the cryptodev burst API to accept bursts of rte_crypto_op
> rather than rte_mbufs.
>
> V3:
> - Addresses V2 comments
> - Rebased for head
>
> Declan Doherty (1):
> cryptodev: change burst API to be crypto op oriented
>
> Fiona Trahe (1):
> cryptodev: API tidy and changes to support future extensions
>
> MAINTAINERS | 6 +-
> app/test/test_cryptodev.c | 894 +++++++++++----------
> app/test/test_cryptodev.h | 9 +-
> app/test/test_cryptodev_perf.c | 270 ++++---
> config/common_bsdapp | 8 -
> config/common_linuxapp | 8 -
> doc/api/doxy-api-index.md | 1 -
> drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 199 ++---
> drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 18 +-
> drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 6 +-
> drivers/crypto/qat/qat_crypto.c | 150 ++--
> drivers/crypto/qat/qat_crypto.h | 14 +-
> drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
> examples/l2fwd-crypto/main.c | 300 ++++---
> lib/Makefile | 1 -
> lib/librte_cryptodev/Makefile | 1 +
> lib/librte_cryptodev/rte_crypto.h | 822 ++++++++-----------
> lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
> lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
> lib/librte_cryptodev/rte_cryptodev.h | 185 ++---
> lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
> lib/librte_cryptodev/rte_cryptodev_version.map | 3 +-
> lib/librte_mbuf/rte_mbuf.h | 6 -
> lib/librte_mbuf_offload/Makefile | 52 --
> lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
> lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 -------
> .../rte_mbuf_offload_version.map | 7 -
> 27 files changed, 2146 insertions(+), 2021 deletions(-)
> create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
> delete mode 100644 lib/librte_mbuf_offload/Makefile
> delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
> delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
> delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
>
self NAK.
There is an issue with mis-merged code in __rte_crypto_op_raw_bulk_alloc
function in rte_crypto.h
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v4 0/2] cryptodev API changes
2016-02-26 17:30 ` [dpdk-dev] [PATCH v3 0/2] cryptodev API changes Declan Doherty
` (2 preceding siblings ...)
2016-02-29 16:00 ` [dpdk-dev] [PATCH v3 0/2] cryptodev API changes Declan Doherty
@ 2016-02-29 16:52 ` Declan Doherty
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 1/2] cryptodev: API tidy and changes to support future extensions Declan Doherty
` (9 more replies)
2016-03-15 7:07 ` [dpdk-dev] [PATCH v3 " Cao, Min
4 siblings, 10 replies; 62+ messages in thread
From: Declan Doherty @ 2016-02-29 16:52 UTC (permalink / raw)
To: dev
This patch set separates the symmetric crypto operations from generic operations
and then modifies the cryptodev burst API to accept bursts of rte_crypto_op
rather than rte_mbufs.
V4:
- Fixes for issues introduced in __rte_crypto_op_raw_bulk_alloc in V3 patcheset.
- Typo fix in cached attribute on rte_crypto_op structure.
V3:
- Addresses V2 comments
- Rebased for head
Declan Doherty (1):
cryptodev: change burst API to be crypto op oriented
Fiona Trahe (1):
cryptodev: API tidy and changes to support future extensions
MAINTAINERS | 6 +-
app/test/test_cryptodev.c | 894 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 270 ++++---
config/common_bsdapp | 8 -
config/common_linuxapp | 8 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 199 ++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 18 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 6 +-
drivers/crypto/qat/qat_crypto.c | 150 ++--
drivers/crypto/qat/qat_crypto.h | 14 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 300 ++++---
lib/Makefile | 1 -
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 819 +++++++------------
lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
lib/librte_cryptodev/rte_cryptodev.h | 185 ++---
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 3 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 -------
.../rte_mbuf_offload_version.map | 7 -
27 files changed, 2143 insertions(+), 2021 deletions(-)
create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
delete mode 100644 lib/librte_mbuf_offload/Makefile
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v4 1/2] cryptodev: API tidy and changes to support future extensions
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 " Declan Doherty
@ 2016-02-29 16:52 ` Declan Doherty
2016-03-04 14:43 ` Thomas Monjalon
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 2/2] cryptodev: change burst API to be crypto op oriented Declan Doherty
` (8 subsequent siblings)
9 siblings, 1 reply; 62+ messages in thread
From: Declan Doherty @ 2016-02-29 16:52 UTC (permalink / raw)
To: dev
From: Fiona Trahe <fiona.trahe@intel.com>
This patch splits symmetric specific definitions and functions away from the
common crypto APIs to facilitate the future extension and expansion of the
cryptodev framework, in order to allow asymmetric crypto operations to be
introduced at a later date, as well as to clean the logical structure of the
public includes. The patch also introduces the _sym prefix to symmetric
specific structure and functions to improve clarity in the API.
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
---
app/test/test_cryptodev.c | 164 +++---
app/test/test_cryptodev_perf.c | 79 +--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 44 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 6 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 4 +-
drivers/crypto/qat/qat_crypto.c | 51 +-
drivers/crypto/qat/qat_crypto.h | 10 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 33 +-
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 563 +------------------
lib/librte_cryptodev/rte_crypto_sym.h | 613 +++++++++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 39 +-
lib/librte_cryptodev/rte_cryptodev.h | 80 ++-
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_mbuf_offload/rte_mbuf_offload.h | 22 +-
16 files changed, 912 insertions(+), 837 deletions(-)
create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 62f8fb0..951b443 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,13 +57,13 @@ struct crypto_testsuite_params {
};
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
struct rte_mbuf_offload *ol;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -78,7 +78,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_param);
@@ -165,7 +165,8 @@ testsuite_setup(void)
ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
"MBUF_OFFLOAD_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_xform),
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -220,7 +221,7 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id,
&ts_params->conf),
@@ -275,7 +276,7 @@ ut_setup(void)
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
ts_params->conf.session_mp.nb_objs =
- (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_PMD) ?
+ (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_SYM_PMD) ?
DEFAULT_NUM_OPS_INFLIGHT :
DEFAULT_NUM_OPS_INFLIGHT;
@@ -319,7 +320,7 @@ ut_teardown(void)
/* free crypto session structure */
if (ut_params->sess) {
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
ut_params->sess);
ut_params->sess = NULL;
}
@@ -464,7 +465,7 @@ test_queue_pair_descriptor_setup(void)
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- ts_params->conf.session_mp.nb_objs = dev_info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = dev_info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
&ts_params->conf), "Failed to configure cryptodev %u",
@@ -766,7 +767,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -776,7 +777,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -786,20 +787,21 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -862,18 +864,18 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_xforms(
+ TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
ut_params->ol, 2),
"failed to allocate space for crypto transforms");
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
/* cipher parameters */
ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
@@ -882,7 +884,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
@@ -958,7 +960,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_SHA1);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -967,7 +969,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -977,13 +979,14 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -991,7 +994,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1066,7 +1069,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1075,7 +1078,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1085,13 +1088,14 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1099,7 +1103,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1168,7 +1172,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_SHA256);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1177,7 +1181,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1187,13 +1191,14 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1201,7 +1206,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1282,7 +1287,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1291,7 +1296,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1301,15 +1306,16 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1317,7 +1323,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1371,7 +1377,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params);
@@ -1386,8 +1392,9 @@ test_AES_CBC_HMAC_SHA512_decrypt_digest_verify(void)
"Failed to create session params");
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
return test_AES_CBC_HMAC_SHA512_decrypt_perform(ut_params->sess,
@@ -1400,7 +1407,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
{
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1409,7 +1416,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1422,7 +1429,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params)
{
@@ -1442,7 +1449,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1450,7 +1457,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1522,7 +1529,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1531,7 +1538,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1541,13 +1548,14 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1555,7 +1563,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)
rte_pktmbuf_prepend(ut_params->ibuf,
@@ -1615,7 +1623,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_AES_XCBC);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1624,7 +1632,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1634,13 +1642,14 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1648,7 +1657,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
@@ -1752,7 +1761,7 @@ test_multi_session(void)
struct crypto_unittest_params *ut_params = &unittest_params;
struct rte_cryptodev_info dev_info;
- struct rte_cryptodev_session **sessions;
+ struct rte_cryptodev_sym_session **sessions;
uint16_t i;
@@ -1761,12 +1770,13 @@ test_multi_session(void)
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- sessions = rte_malloc(NULL, (sizeof(struct rte_cryptodev_session *) *
- dev_info.max_nb_sessions) + 1, 0);
+ sessions = rte_malloc(NULL,
+ (sizeof(struct rte_cryptodev_sym_session *) *
+ dev_info.sym.max_nb_sessions) + 1, 0);
/* Create multiple crypto sessions*/
- for (i = 0; i < dev_info.max_nb_sessions; i++) {
- sessions[i] = rte_cryptodev_session_create(
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++) {
+ sessions[i] = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(sessions[i],
@@ -1781,13 +1791,13 @@ test_multi_session(void)
}
/* Next session create should fail */
- sessions[i] = rte_cryptodev_session_create(ts_params->valid_devs[0],
+ sessions[i] = rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NULL(sessions[i],
"Session creation succeeded unexpectedly!");
- for (i = 0; i < dev_info.max_nb_sessions; i++)
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++)
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
sessions[i]);
rte_free(sessions);
@@ -1806,7 +1816,7 @@ test_not_in_place_crypto(void)
/* Create multiple crypto sessions*/
- ut_params->sess = rte_cryptodev_session_create(
+ ut_params->sess = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0], &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1828,7 +1838,7 @@ test_not_in_place_crypto(void)
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1836,7 +1846,7 @@ test_not_in_place_crypto(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1962,7 +1972,7 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite = {
static int
test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_qat_testsuite);
}
static struct test_command cryptodev_qat_cmd = {
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 728bcf0..42dd9bc 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -63,12 +63,12 @@ struct crypto_testsuite_params {
#define MAX_NUM_OF_OPS_PER_UT (128)
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf_offload *ol;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
@@ -127,7 +127,7 @@ testsuite_setup(void)
ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -179,7 +179,7 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->dev_id,
&ts_params->conf),
@@ -252,7 +252,7 @@ ut_teardown(void)
/* free crypto session structure */
if (ut_params->sess)
- rte_cryptodev_session_free(ts_params->dev_id,
+ rte_cryptodev_sym_session_free(ts_params->dev_id,
ut_params->sess);
/* free crypto operation structure */
@@ -1713,7 +1713,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1723,7 +1723,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1733,7 +1733,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1741,7 +1741,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < num_to_submit ; b++) {
tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ (const char *)data_params[0].expected.ciphertext,
data_params[0].length, 0);
TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
@@ -1753,12 +1753,12 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
DIGEST_BYTE_LENGTH_SHA256);
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO);
+ ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
@@ -1814,7 +1814,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, burst_size);
if (burst_received == 0)
failed_polls++;
@@ -1824,10 +1825,13 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
total_cycles += end_cycles - start_cycles;
}
while (num_received != num_to_submit) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num,
+ 0, NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, burst_size);
if (burst_received == 0)
failed_polls++;
@@ -1876,7 +1880,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1885,7 +1889,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
ut_params->cipher_xform.cipher.key.length = CIPHER_IV_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1895,7 +1899,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1928,12 +1932,12 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1957,15 +1961,19 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent) < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent : DEFAULT_BURST_SIZE);
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, tx_mbufs,
+ ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
+ < DEFAULT_BURST_SIZE) ?
+ DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
+ DEFAULT_BURST_SIZE);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
@@ -1973,10 +1981,13 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
num_received += burst_received;
}
while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num,
+ 0, NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num, 0,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num, 0,
rx_mbufs, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
@@ -1987,7 +1998,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
mmps = (double)num_received*mhz/(end_cycles - start_cycles);
throughput = mmps*data_params[index].length*8;
printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
- data_params[index].length, num_sent, num_received);
+ data_params[index].length, num_sent, num_received);
printf("\t%.2f\t%u", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
@@ -2044,7 +2055,7 @@ perftest_aesni_mb_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
static int
perftest_qat_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_testsuite);
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index c7d884c..61d93cd 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -109,7 +109,7 @@ calculate_auth_precomputes(hash_one_block_t one_block_hash,
/** Get xform chain order */
static int
-aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
{
/*
* Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
@@ -119,12 +119,12 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
if (xform->next == NULL || xform->next->next != NULL)
return -1;
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return HASH_CIPHER;
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return CIPHER_HASH;
return -1;
@@ -134,11 +134,11 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
static int
aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
hash_one_block_t hash_oneblock_fn;
- if (xform->type != RTE_CRYPTO_XFORM_AUTH) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
MB_LOG_ERR("Crypto xform struct not of type auth");
return -1;
}
@@ -196,11 +196,11 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
static int
aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
aes_keyexp_t aes_keyexp_fn;
- if (xform->type != RTE_CRYPTO_XFORM_CIPHER) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
MB_LOG_ERR("Crypto xform struct not of type cipher");
return -1;
}
@@ -259,10 +259,10 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
- const struct rte_crypto_xform *auth_xform = NULL;
- const struct rte_crypto_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
/* Select Crypto operation - hash then cipher / cipher then hash */
switch (aesni_mb_get_chain_order(xform)) {
@@ -296,11 +296,11 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
if (unlikely(crypto_op->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
@@ -313,7 +313,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
return NULL;
sess = (struct aesni_mb_session *)
- ((struct rte_cryptodev_session *)_sess)->_private;
+ ((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
sess, crypto_op->xform) != 0)) {
@@ -339,7 +339,8 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
*/
static JOB_AES_HMAC *
process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_op *c_op, struct aesni_mb_session *session)
+ struct rte_crypto_sym_op *c_op,
+ struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
@@ -432,14 +433,14 @@ static struct rte_mbuf *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
struct rte_mbuf *m;
- struct rte_crypto_op *c_op;
+ struct rte_crypto_sym_op *c_op;
if (job->user_data == NULL)
return NULL;
/* handled retrieved job */
m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_op *)job->user_data2;
+ c_op = (struct rte_crypto_sym_op *)job->user_data2;
/* set status as successful by default */
c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -459,7 +460,7 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_OP_SESSIONLESS) {
+ if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
rte_mempool_put(qp->sess_mp, c_op->session);
c_op->session = NULL;
}
@@ -511,7 +512,8 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
int i, processed_jobs = 0;
for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i], RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(bufs[i],
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
qp->qp_stats.enqueue_err_count++;
goto flush_jobs;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index e43b898..3cd9990 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -108,7 +108,7 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
if (dev_info != NULL) {
dev_info->dev_type = dev->dev_type;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->max_nb_sessions = internals->max_nb_sessions;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
}
}
@@ -243,7 +243,7 @@ aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
/** Configure a aesni multi-buffer session from a crypto xform chain */
static void *
aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform, void *sess)
{
struct aesni_mb_private *internals = dev->data->dev_private;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 2f98609..ab70c15 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -218,7 +218,7 @@ struct aesni_mb_session {
extern int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform);
+ const struct rte_crypto_sym_xform *xform);
/** device specific operations function pointer structure */
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 828756b..e7b9027 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,37 +89,37 @@ void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
}
static int
-qat_get_cmd_id(const struct rte_crypto_xform *xform)
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
{
if (xform->next == NULL)
return -1;
/* Cipher Only */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_CIPHER; */
/* Authentication Only */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_AUTH; */
/* Cipher then Authenticate */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
/* Authenticate then Cipher */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
return -1;
}
static struct rte_crypto_auth_xform *
-qat_get_auth_xform(struct rte_crypto_xform *xform)
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return &xform->auth;
xform = xform->next;
@@ -129,10 +129,10 @@ qat_get_auth_xform(struct rte_crypto_xform *xform)
}
static struct rte_crypto_cipher_xform *
-qat_get_cipher_xform(struct rte_crypto_xform *xform)
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return &xform->cipher;
xform = xform->next;
@@ -144,7 +144,7 @@ qat_get_cipher_xform(struct rte_crypto_xform *xform)
void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private)
+ struct rte_crypto_sym_xform *xform, void *session_private)
{
struct qat_pmd_private *internals = dev->data->dev_private;
@@ -261,7 +261,7 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
auth_xform->digest_length))
goto error_out;
- return (struct rte_cryptodev_session *)session;
+ return (struct rte_crypto_sym_session *)session;
error_out:
rte_mempool_put(internals->sess_mp, session);
@@ -275,7 +275,7 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
+uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
register struct qat_queue *queue;
@@ -327,7 +327,8 @@ kick_tail:
}
uint16_t
-qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct rte_mbuf_offload *ol;
struct qat_queue *queue;
@@ -343,12 +344,13 @@ qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
msg_counter != nb_pkts) {
rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf, RTE_PKTMBUF_OL_CRYPTO);
-
+ ol = rte_pktmbuf_offload_get(rx_mbuf,
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ ol->op.crypto.status =
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
@@ -384,20 +386,21 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
"to (%p) mbuf.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
" requests mbuf (%p) is sessionless.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type != RTE_CRYPTODEV_QAT_PMD)) {
+ if (unlikely(ol->op.crypto.session->type
+ != RTE_CRYPTODEV_QAT_SYM_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
@@ -520,8 +523,8 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
ADF_NUM_SYM_QPS_PER_BUNDLE *
ADF_NUM_BUNDLES_PER_DEV;
- info->max_nb_sessions = internals->max_nb_sessions;
- info->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
}
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index d680364..e9f71fe 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -109,16 +109,18 @@ qat_crypto_sym_session_init(struct rte_mempool *mempool, void *priv_sess);
extern void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+ struct rte_crypto_sym_xform *xform, void *session_private);
extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
uint16_t
-qat_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
uint16_t
-qat_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index e500c1e..85700fc 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,11 +89,11 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->pci_dev->addr.devid,
cryptodev->pci_dev->addr.function);
- cryptodev->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
+ cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index a950b74..93de786 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -124,13 +124,13 @@ struct l2fwd_crypto_options {
enum l2fwd_crypto_xform_chain xform_chain;
- struct rte_crypto_xform cipher_xform;
+ struct rte_crypto_sym_xform cipher_xform;
uint8_t ckey_data[32];
- struct rte_crypto_key iv_key;
+ struct rte_crypto_sym_key iv_key;
uint8_t ivkey_data[16];
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform auth_xform;
uint8_t akey_data[128];
};
@@ -142,8 +142,11 @@ struct l2fwd_crypto_params {
unsigned digest_length;
unsigned block_size;
- struct rte_crypto_key iv_key;
- struct rte_cryptodev_session *session;
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } iv;
+ struct rte_cryptodev_sym_session *session;
};
/** lcore configuration */
@@ -372,7 +375,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
/* Append space for digest to end of packet */
ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
@@ -474,11 +477,11 @@ generate_random_key(uint8_t *key, unsigned length)
key[i] = rand() % 0xff;
}
-static struct rte_cryptodev_session *
+static struct rte_cryptodev_sym_session *
initialize_crypto_session(struct l2fwd_crypto_options *options,
uint8_t cdev_id)
{
- struct rte_crypto_xform *first_xform;
+ struct rte_crypto_sym_xform *first_xform;
if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
first_xform = &options->cipher_xform;
@@ -489,7 +492,7 @@ initialize_crypto_session(struct l2fwd_crypto_options *options,
}
/* Setup Cipher Parameters */
- return rte_cryptodev_session_create(cdev_id, first_xform);
+ return rte_cryptodev_sym_session_create(cdev_id, first_xform);
}
static void
@@ -610,7 +613,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
m = pkts_burst[j];
ol = rte_pktmbuf_offload_alloc(
l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
/*
* If we can't allocate a offload, then drop
* the rest of the burst and dequeue and
@@ -689,7 +692,7 @@ parse_cryptodev_type(enum rte_cryptodev_type *type, char *optarg)
*type = RTE_CRYPTODEV_AESNI_MB_PMD;
return 0;
} else if (strcmp("QAT", optarg) == 0) {
- *type = RTE_CRYPTODEV_QAT_PMD;
+ *type = RTE_CRYPTODEV_QAT_SYM_PMD;
return 0;
}
@@ -745,7 +748,7 @@ parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
/** Parse crypto key command line argument */
static int
-parse_key(struct rte_crypto_key *key __rte_unused,
+parse_key(struct rte_crypto_sym_key *key __rte_unused,
unsigned length __rte_unused, char *arg __rte_unused)
{
printf("Currently an unsupported argument!\n");
@@ -982,7 +985,7 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
switch (options->cdev_type) {
case RTE_CRYPTODEV_AESNI_MB_PMD:
printf("cryptodev type: AES-NI MB PMD\n"); break;
- case RTE_CRYPTODEV_QAT_PMD:
+ case RTE_CRYPTODEV_QAT_SYM_PMD:
printf("cryptodev type: QAT PMD\n"); break;
default:
break;
@@ -1179,7 +1182,7 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports)
unsigned i, cdev_id, cdev_count, enabled_cdev_count = 0;
int retval;
- if (options->cdev_type == RTE_CRYPTODEV_QAT_PMD) {
+ if (options->cdev_type == RTE_CRYPTODEV_QAT_SYM_PMD) {
if (rte_cryptodev_count() < nb_ports)
return -1;
} else if (options->cdev_type == RTE_CRYPTODEV_AESNI_MB_PMD) {
diff --git a/lib/librte_cryptodev/Makefile b/lib/librte_cryptodev/Makefile
index 81fa3fc..0d59229 100644
--- a/lib/librte_cryptodev/Makefile
+++ b/lib/librte_cryptodev/Makefile
@@ -45,6 +45,7 @@ SRCS-y += rte_cryptodev.c
# export include files
SYMLINK-y-include += rte_crypto.h
+SYMLINK-y-include += rte_crypto_sym.h
SYMLINK-y-include += rte_cryptodev.h
SYMLINK-y-include += rte_cryptodev_pmd.h
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index 42343a8..df0c0b8 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,572 +36,15 @@
/**
* @file rte_crypto.h
*
- * RTE Cryptographic Definitions
+ * RTE Cryptography Common Definitions
*
- * Defines symmetric cipher and authentication algorithms and modes, as well
- * as supported symmetric crypto operation combinations.
*/
#ifdef __cplusplus
extern "C" {
#endif
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_mempool.h>
-
-/** Symmetric Cipher Algorithms */
-enum rte_crypto_cipher_algorithm {
- RTE_CRYPTO_CIPHER_NULL = 1,
- /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
-
- RTE_CRYPTO_CIPHER_3DES_CBC,
- /**< Triple DES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_3DES_CTR,
- /**< Triple DES algorithm in CTR mode */
- RTE_CRYPTO_CIPHER_3DES_ECB,
- /**< Triple DES algorithm in ECB mode */
-
- RTE_CRYPTO_CIPHER_AES_CBC,
- /**< AES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_AES_CCM,
- /**< AES algorithm in CCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_CCM* element of the
- * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_xform* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation
- */
- RTE_CRYPTO_CIPHER_AES_CTR,
- /**< AES algorithm in Counter mode */
- RTE_CRYPTO_CIPHER_AES_ECB,
- /**< AES algorithm in ECB mode */
- RTE_CRYPTO_CIPHER_AES_F8,
- /**< AES algorithm in F8 mode */
- RTE_CRYPTO_CIPHER_AES_GCM,
- /**< AES algorithm in GCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_GCM* element of the
- * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_setup_data* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation.
- */
- RTE_CRYPTO_CIPHER_AES_XTS,
- /**< AES algorithm in XTS mode */
-
- RTE_CRYPTO_CIPHER_ARC4,
- /**< (A)RC4 cipher algorithm */
-
- RTE_CRYPTO_CIPHER_KASUMI_F8,
- /**< Kasumi algorithm in F8 mode */
-
- RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- /**< SNOW3G algorithm in UEA2 mode */
-
- RTE_CRYPTO_CIPHER_ZUC_EEA3
- /**< ZUC algorithm in EEA3 mode */
-};
-
-/** Symmetric Cipher Direction */
-enum rte_crypto_cipher_operation {
- RTE_CRYPTO_CIPHER_OP_ENCRYPT,
- /**< Encrypt cipher operation */
- RTE_CRYPTO_CIPHER_OP_DECRYPT
- /**< Decrypt cipher operation */
-};
-
-/** Crypto key structure */
-struct rte_crypto_key {
- uint8_t *data; /**< pointer to key data */
- phys_addr_t phys_addr;
- size_t length; /**< key length in bytes */
-};
-
-/**
- * Symmetric Cipher Setup Data.
- *
- * This structure contains data relating to Cipher (Encryption and Decryption)
- * use to create a session.
- */
-struct rte_crypto_cipher_xform {
- enum rte_crypto_cipher_operation op;
- /**< This parameter determines if the cipher operation is an encrypt or
- * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
- * only encrypt operations are valid.
- */
- enum rte_crypto_cipher_algorithm algo;
- /**< Cipher algorithm */
-
- struct rte_crypto_key key;
- /**< Cipher key
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
- * point to a concatenation of the AES encryption key followed by a
- * keymask. As per RFC3711, the keymask should be padded with trailing
- * bytes to match the length of the encryption key used.
- *
- * For AES-XTS mode of operation, two keys must be provided and
- * key.data must point to the two keys concatenated together (Key1 ||
- * Key2). The cipher key length will contain the total size of both
- * keys.
- *
- * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
- * 192 bits (24 bytes) or 256 bits (32 bytes).
- *
- * For the CCM mode of operation, the only supported key length is 128
- * bits (16 bytes).
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
- * should be set to the combined length of the encryption key and the
- * keymask. Since the keymask and the encryption key are the same size,
- * key.length should be set to 2 x the AES encryption key length.
- *
- * For the AES-XTS mode of operation:
- * - Two keys must be provided and key.length refers to total length of
- * the two keys.
- * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
- * - Both keys must have the same size.
- **/
-};
-
-/** Symmetric Authentication / Hash Algorithms */
-enum rte_crypto_auth_algorithm {
- RTE_CRYPTO_AUTH_NULL = 1,
- /**< NULL hash algorithm. */
-
- RTE_CRYPTO_AUTH_AES_CBC_MAC,
- /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
- RTE_CRYPTO_AUTH_AES_CCM,
- /**< AES algorithm in CCM mode. This is an authenticated cipher. When
- * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
- * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
- * set up the related rte_crypto_cipher_setup_data structure in the
- * session context or the corresponding parameter in the crypto
- * operation data structures op_params parameter MUST be set for a
- * session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_CMAC,
- /**< AES CMAC algorithm. */
- RTE_CRYPTO_AUTH_AES_GCM,
- /**< AES algorithm in GCM mode. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_GMAC,
- /**< AES GMAC algorithm. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_XCBC_MAC,
- /**< AES XCBC algorithm. */
-
- RTE_CRYPTO_AUTH_KASUMI_F9,
- /**< Kasumi algorithm in F9 mode. */
-
- RTE_CRYPTO_AUTH_MD5,
- /**< MD5 algorithm */
- RTE_CRYPTO_AUTH_MD5_HMAC,
- /**< HMAC using MD5 algorithm */
-
- RTE_CRYPTO_AUTH_SHA1,
- /**< 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA1_HMAC,
- /**< HMAC using 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224,
- /**< 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224_HMAC,
- /**< HMAC using 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256,
- /**< 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256_HMAC,
- /**< HMAC using 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384,
- /**< 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384_HMAC,
- /**< HMAC using 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512,
- /**< 512 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512_HMAC,
- /**< HMAC using 512 bit SHA algorithm. */
-
- RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- /**< SNOW3G algorithm in UIA2 mode. */
-
- RTE_CRYPTO_AUTH_ZUC_EIA3,
- /**< ZUC algorithm in EIA3 mode */
-};
-
-/** Symmetric Authentication / Hash Operations */
-enum rte_crypto_auth_operation {
- RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
- RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
-};
-
-/**
- * Authentication / Hash transform data.
- *
- * This structure contains data relating to an authentication/hash crypto
- * transforms. The fields op, algo and digest_length are common to all
- * authentication transforms and MUST be set.
- */
-struct rte_crypto_auth_xform {
- enum rte_crypto_auth_operation op;
- /**< Authentication operation type */
- enum rte_crypto_auth_algorithm algo;
- /**< Authentication algorithm selection */
-
- struct rte_crypto_key key; /**< Authentication key data.
- * The authentication key length MUST be less than or equal to the
- * block size of the algorithm. It is the callers responsibility to
- * ensure that the key length is compliant with the standard being used
- * (for example RFC 2104, FIPS 198a).
- */
-
- uint32_t digest_length;
- /**< Length of the digest to be returned. If the verify option is set,
- * this specifies the length of the digest to be compared for the
- * session.
- *
- * If the value is less than the maximum length allowed by the hash,
- * the result shall be truncated. If the value is greater than the
- * maximum length allowed by the hash then an error will be generated
- * by *rte_cryptodev_session_create* or by the
- * *rte_cryptodev_enqueue_burst* if using session-less APIs.
- */
-
- uint32_t add_auth_data_length;
- /**< The length of the additional authenticated data (AAD) in bytes.
- * The maximum permitted value is 240 bytes, unless otherwise specified
- * below.
- *
- * This field must be specified when the hash algorithm is one of the
- * following:
- *
- * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
- * length of the IV (which should be 16).
- *
- * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
- * the length of the Additional Authenticated Data (called A, in NIST
- * SP800-38D).
- *
- * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
- * the length of the associated data (called A, in NIST SP800-38C).
- * Note that this does NOT include the length of any padding, or the
- * 18 bytes reserved at the start of the above field to store the
- * block B0 and the encoded length. The maximum permitted value in
- * this case is 222 bytes.
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
- * this field is not used and should be set to 0. Instead the length
- * of the AAD data is specified in the message length to hash field of
- * the rte_crypto_op_data structure.
- */
-};
-
-/** Crypto transformation types */
-enum rte_crypto_xform_type {
- RTE_CRYPTO_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
- RTE_CRYPTO_XFORM_AUTH, /**< Authentication xform */
- RTE_CRYPTO_XFORM_CIPHER /**< Cipher xform */
-};
-
-/**
- * Crypto transform structure.
- *
- * This is used to specify the crypto transforms required, multiple transforms
- * can be chained together to specify a chain transforms such as authentication
- * then cipher, or cipher then authentication. Each transform structure can
- * hold a single transform, the type field is used to specify which transform
- * is contained within the union
- */
-struct rte_crypto_xform {
- struct rte_crypto_xform *next; /**< next xform in chain */
-
- enum rte_crypto_xform_type type; /**< xform type */
- union {
- struct rte_crypto_auth_xform auth;
- /**< Authentication / hash xform */
- struct rte_crypto_cipher_xform cipher;
- /**< Cipher xform */
- };
-};
-
-/**
- * Crypto operation session type. This is used to specify whether a crypto
- * operation has session structure attached for immutable parameters or if all
- * operation information is included in the operation data structure.
- */
-enum rte_crypto_op_sess_type {
- RTE_CRYPTO_OP_WITH_SESSION, /**< Session based crypto operation */
- RTE_CRYPTO_OP_SESSIONLESS /**< Session-less crypto operation */
-};
-
-/** Status of crypto operation */
-enum rte_crypto_op_status {
- RTE_CRYPTO_OP_STATUS_SUCCESS,
- /**< Operation completed successfully */
- RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
- /**< Operation not yet submitted to a cryptodev */
- RTE_CRYPTO_OP_STATUS_ENQUEUED,
- /**< Operation is enqueued on device */
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
- /**< Authentication verification failed */
- RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
- /**< Operation failed due to invalid arguments in request */
- RTE_CRYPTO_OP_STATUS_ERROR,
- /**< Error handling operation */
-};
-
-/**
- * Cryptographic Operation Data.
- *
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_enqueue_burst() call
- * for performing cipher, hash, or a combined hash and cipher operations.
- */
-struct rte_crypto_op {
- enum rte_crypto_op_sess_type type;
- enum rte_crypto_op_status status;
-
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
-
- union {
- struct rte_cryptodev_session *session;
- /**< Handle for the initialised session context */
- struct rte_crypto_xform *xform;
- /**< Session-less API crypto operation parameters */
- };
-
- struct {
- struct {
- uint32_t offset;
- /**< Starting point for cipher processing, specified
- * as number of bytes from start of data in the source
- * buffer. The result of the cipher operation will be
- * written back into the output buffer starting at
- * this location.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source buffer
- * on which the cryptographic operation will be
- * computed. This must be a multiple of the block size
- * if a block cipher is being used. This is also the
- * same as the result length.
- *
- * @note
- * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
- * this value should not include the length of the
- * padding or the length of the MAC; the driver will
- * compute the actual number of bytes over which the
- * encryption will occur, which will include these
- * values.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
- * field should be set to 0.
- */
- } to_cipher; /**< Data offsets and length for ciphering */
-
- struct {
- uint32_t offset;
- /**< Starting point for hash processing, specified as
- * number of bytes from start of packet in source
- * buffer.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
- * mode of operation, this field specifies the start
- * of the AAD data in the source buffer.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source
- * buffer that the hash will be computed on.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
- * of operation, this field specifies the length of
- * the AAD data in the source buffer.
- */
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
-
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
-
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
-
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
-
- void *user_data;
- /**< opaque pointer for user data */
-};
-
-
-/**
- * Reset the fields of a crypto operation to their default values.
- *
- * @param op The crypto operation to be reset.
- */
-static inline void
-__rte_crypto_op_reset(struct rte_crypto_op *op)
-{
- op->type = RTE_CRYPTO_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
-}
-
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_op_attach_session(struct rte_crypto_op *op,
- struct rte_cryptodev_session *sess)
-{
- op->session = sess;
- op->type = RTE_CRYPTO_OP_WITH_SESSION;
-}
+#include <rte_crypto_sym.h>
#ifdef __cplusplus
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
new file mode 100644
index 0000000..cb2b8f6
--- /dev/null
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -0,0 +1,613 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_SYM_H_
+#define _RTE_CRYPTO_SYM_H_
+
+/**
+ * @file rte_crypto_sym.h
+ *
+ * RTE Definitions for Symmetric Cryptography
+ *
+ * Defines symmetric cipher and authentication algorithms and modes, as well
+ * as supported symmetric crypto operation combinations.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+
+/** Symmetric Cipher Algorithms */
+enum rte_crypto_cipher_algorithm {
+ RTE_CRYPTO_CIPHER_NULL = 1,
+ /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
+
+ RTE_CRYPTO_CIPHER_3DES_CBC,
+ /**< Triple DES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_3DES_CTR,
+ /**< Triple DES algorithm in CTR mode */
+ RTE_CRYPTO_CIPHER_3DES_ECB,
+ /**< Triple DES algorithm in ECB mode */
+
+ RTE_CRYPTO_CIPHER_AES_CBC,
+ /**< AES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_AES_CCM,
+ /**< AES algorithm in CCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_CCM* element of the
+ * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_xform* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation
+ */
+ RTE_CRYPTO_CIPHER_AES_CTR,
+ /**< AES algorithm in Counter mode */
+ RTE_CRYPTO_CIPHER_AES_ECB,
+ /**< AES algorithm in ECB mode */
+ RTE_CRYPTO_CIPHER_AES_F8,
+ /**< AES algorithm in F8 mode */
+ RTE_CRYPTO_CIPHER_AES_GCM,
+ /**< AES algorithm in GCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_GCM* element of the
+ * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_setup_data* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_CIPHER_AES_XTS,
+ /**< AES algorithm in XTS mode */
+
+ RTE_CRYPTO_CIPHER_ARC4,
+ /**< (A)RC4 cipher algorithm */
+
+ RTE_CRYPTO_CIPHER_KASUMI_F8,
+ /**< Kasumi algorithm in F8 mode */
+
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ /**< SNOW3G algorithm in UEA2 mode */
+
+ RTE_CRYPTO_CIPHER_ZUC_EEA3
+ /**< ZUC algorithm in EEA3 mode */
+};
+
+/** Symmetric Cipher Direction */
+enum rte_crypto_cipher_operation {
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ /**< Encrypt cipher operation */
+ RTE_CRYPTO_CIPHER_OP_DECRYPT
+ /**< Decrypt cipher operation */
+};
+
+/**
+ * Symmetric Cipher Setup Data.
+ *
+ * This structure contains data relating to Cipher (Encryption and Decryption)
+ * use to create a session.
+ */
+struct rte_crypto_cipher_xform {
+ enum rte_crypto_cipher_operation op;
+ /**< This parameter determines if the cipher operation is an encrypt or
+ * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
+ * only encrypt operations are valid.
+ */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< Cipher algorithm */
+
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } key;
+ /**< Cipher key
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
+ * point to a concatenation of the AES encryption key followed by a
+ * keymask. As per RFC3711, the keymask should be padded with trailing
+ * bytes to match the length of the encryption key used.
+ *
+ * For AES-XTS mode of operation, two keys must be provided and
+ * key.data must point to the two keys concatenated together (Key1 ||
+ * Key2). The cipher key length will contain the total size of both
+ * keys.
+ *
+ * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
+ * 192 bits (24 bytes) or 256 bits (32 bytes).
+ *
+ * For the CCM mode of operation, the only supported key length is 128
+ * bits (16 bytes).
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
+ * should be set to the combined length of the encryption key and the
+ * keymask. Since the keymask and the encryption key are the same size,
+ * key.length should be set to 2 x the AES encryption key length.
+ *
+ * For the AES-XTS mode of operation:
+ * - Two keys must be provided and key.length refers to total length of
+ * the two keys.
+ * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
+ * - Both keys must have the same size.
+ **/
+};
+
+/** Symmetric Authentication / Hash Algorithms */
+enum rte_crypto_auth_algorithm {
+ RTE_CRYPTO_AUTH_NULL = 1,
+ /**< NULL hash algorithm. */
+
+ RTE_CRYPTO_AUTH_AES_CBC_MAC,
+ /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
+ RTE_CRYPTO_AUTH_AES_CCM,
+ /**< AES algorithm in CCM mode. This is an authenticated cipher. When
+ * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
+ * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
+ * set up the related rte_crypto_cipher_setup_data structure in the
+ * session context or the corresponding parameter in the crypto
+ * operation data structures op_params parameter MUST be set for a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_CMAC,
+ /**< AES CMAC algorithm. */
+ RTE_CRYPTO_AUTH_AES_GCM,
+ /**< AES algorithm in GCM mode. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_GMAC,
+ /**< AES GMAC algorithm. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ /**< AES XCBC algorithm. */
+
+ RTE_CRYPTO_AUTH_KASUMI_F9,
+ /**< Kasumi algorithm in F9 mode. */
+
+ RTE_CRYPTO_AUTH_MD5,
+ /**< MD5 algorithm */
+ RTE_CRYPTO_AUTH_MD5_HMAC,
+ /**< HMAC using MD5 algorithm */
+
+ RTE_CRYPTO_AUTH_SHA1,
+ /**< 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA1_HMAC,
+ /**< HMAC using 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224,
+ /**< 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224_HMAC,
+ /**< HMAC using 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256,
+ /**< 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256_HMAC,
+ /**< HMAC using 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384,
+ /**< 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384_HMAC,
+ /**< HMAC using 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512,
+ /**< 512 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512_HMAC,
+ /**< HMAC using 512 bit SHA algorithm. */
+
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ /**< SNOW3G algorithm in UIA2 mode. */
+
+ RTE_CRYPTO_AUTH_ZUC_EIA3,
+ /**< ZUC algorithm in EIA3 mode */
+};
+
+/** Symmetric Authentication / Hash Operations */
+enum rte_crypto_auth_operation {
+ RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
+ RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
+};
+
+/**
+ * Authentication / Hash transform data.
+ *
+ * This structure contains data relating to an authentication/hash crypto
+ * transforms. The fields op, algo and digest_length are common to all
+ * authentication transforms and MUST be set.
+ */
+struct rte_crypto_auth_xform {
+ enum rte_crypto_auth_operation op;
+ /**< Authentication operation type */
+ enum rte_crypto_auth_algorithm algo;
+ /**< Authentication algorithm selection */
+
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } key;
+ /**< Authentication key data.
+ * The authentication key length MUST be less than or equal to the
+ * block size of the algorithm. It is the callers responsibility to
+ * ensure that the key length is compliant with the standard being used
+ * (for example RFC 2104, FIPS 198a).
+ */
+
+ uint32_t digest_length;
+ /**< Length of the digest to be returned. If the verify option is set,
+ * this specifies the length of the digest to be compared for the
+ * session.
+ *
+ * If the value is less than the maximum length allowed by the hash,
+ * the result shall be truncated. If the value is greater than the
+ * maximum length allowed by the hash then an error will be generated
+ * by *rte_cryptodev_sym_session_create* or by the
+ * *rte_cryptodev_sym_enqueue_burst* if using session-less APIs.
+ */
+
+ uint32_t add_auth_data_length;
+ /**< The length of the additional authenticated data (AAD) in bytes.
+ * The maximum permitted value is 240 bytes, unless otherwise specified
+ * below.
+ *
+ * This field must be specified when the hash algorithm is one of the
+ * following:
+ *
+ * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
+ * length of the IV (which should be 16).
+ *
+ * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
+ * the length of the Additional Authenticated Data (called A, in NIST
+ * SP800-38D).
+ *
+ * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
+ * the length of the associated data (called A, in NIST SP800-38C).
+ * Note that this does NOT include the length of any padding, or the
+ * 18 bytes reserved at the start of the above field to store the
+ * block B0 and the encoded length. The maximum permitted value in
+ * this case is 222 bytes.
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
+ * this field is not used and should be set to 0. Instead the length
+ * of the AAD data is specified in the message length to hash field of
+ * the rte_crypto_sym_op_data structure.
+ */
+};
+
+/** Crypto transformation types */
+enum rte_crypto_sym_xform_type {
+ RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
+ RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */
+ RTE_CRYPTO_SYM_XFORM_CIPHER /**< Cipher xform */
+};
+
+/**
+ * Symmetric crypto transform structure.
+ *
+ * This is used to specify the crypto transforms required, multiple transforms
+ * can be chained together to specify a chain transforms such as authentication
+ * then cipher, or cipher then authentication. Each transform structure can
+ * hold a single transform, the type field is used to specify which transform
+ * is contained within the union
+ */
+struct rte_crypto_sym_xform {
+ struct rte_crypto_sym_xform *next;
+ /**< next xform in chain */
+ enum rte_crypto_sym_xform_type type
+ ; /**< xform type */
+ union {
+ struct rte_crypto_auth_xform auth;
+ /**< Authentication / hash xform */
+ struct rte_crypto_cipher_xform cipher;
+ /**< Cipher xform */
+ };
+};
+
+/**
+ * Crypto operation session type. This is used to specify whether a crypto
+ * operation has session structure attached for immutable parameters or if all
+ * operation information is included in the operation data structure.
+ */
+enum rte_crypto_sym_op_sess_type {
+ RTE_CRYPTO_SYM_OP_WITH_SESSION, /**< Session based crypto operation */
+ RTE_CRYPTO_SYM_OP_SESSIONLESS /**< Session-less crypto operation */
+};
+
+
+/** Status of crypto operation */
+enum rte_crypto_op_status {
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ /**< Operation completed successfully */
+ RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
+ /**< Operation not yet submitted to a cryptodev */
+ RTE_CRYPTO_OP_STATUS_ENQUEUED,
+ /**< Operation is enqueued on device */
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
+ /**< Authentication verification failed */
+ RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
+ /**< Operation failed due to invalid arguments in request */
+ RTE_CRYPTO_OP_STATUS_ERROR,
+ /**< Error handling operation */
+};
+
+/**
+ * Cryptographic Operation Data.
+ *
+ * This structure contains data relating to performing cryptographic processing
+ * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
+ * call for performing cipher, hash, or a combined hash and cipher operations.
+ */
+struct rte_crypto_sym_op {
+ enum rte_crypto_sym_op_sess_type type;
+ enum rte_crypto_op_status status;
+
+ struct {
+ struct rte_mbuf *m; /**< Destination mbuf */
+ uint8_t offset; /**< Data offset */
+ } dst;
+
+ union {
+ struct rte_cryptodev_sym_session *session;
+ /**< Handle for the initialised session context */
+ struct rte_crypto_sym_xform *xform;
+ /**< Session-less API crypto operation parameters */
+ };
+
+ struct {
+ struct {
+ uint32_t offset;
+ /**< Starting point for cipher processing, specified
+ * as number of bytes from start of data in the source
+ * buffer. The result of the cipher operation will be
+ * written back into the output buffer starting at
+ * this location.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source buffer
+ * on which the cryptographic operation will be
+ * computed. This must be a multiple of the block size
+ * if a block cipher is being used. This is also the
+ * same as the result length.
+ *
+ * @note
+ * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
+ * this value should not include the length of the
+ * padding or the length of the MAC; the driver will
+ * compute the actual number of bytes over which the
+ * encryption will occur, which will include these
+ * values.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
+ * field should be set to 0.
+ */
+ } to_cipher; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint32_t offset;
+ /**< Starting point for hash processing, specified as
+ * number of bytes from start of packet in source
+ * buffer.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
+ * mode of operation, this field specifies the start
+ * of the AAD data in the source buffer.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source
+ * buffer that the hash will be computed on.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
+ * of operation, this field specifies the length of
+ * the AAD data in the source buffer.
+ */
+ } to_hash; /**< Data offsets and length for authentication */
+ } data; /**< Details of data to be operated on */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
+ * Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length is 96
+ * bits) or J0 (for other sizes), where J0 is as defined by
+ * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
+ * needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the nonce
+ * should be written starting at &iv[1] (to allow space for the
+ * implementation to write in the flags in the first byte).
+ * Note that a full 16 bytes should be allocated, even though
+ * the length field will have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
+ * 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD be
+ * 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ size_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the length of the
+ * IV (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length of the
+ * counter (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
+ * which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce, which can
+ * be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result should be
+ * inserted (in the case of digest generation) or where the
+ * purported digest exists (in the case of digest
+ * verification).
+ *
+ * At session creation time, the client specified the digest
+ * result length with the digest_length member of the @ref
+ * rte_crypto_auth_xform structure. For physical crypto
+ * devices the caller must allocate at least digest_length of
+ * physically contiguous memory at this location.
+ *
+ * For digest generation, the digest result will overwrite
+ * any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is understood
+ * to be in the destination buffer for digest generation, and
+ * in the source buffer for digest verification. The location
+ * of the digest result in this case is immediately following
+ * the region over which the digest is computed.
+ */
+ phys_addr_t phys_addr; /**< Physical address of digest */
+ uint32_t length; /**< Length of digest */
+ } digest; /**< Digest parameters */
+
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD) needed for
+ * authenticated cipher mechanisms (CCM and GCM), and to the IV
+ * for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is set up
+ * for the session in the @ref rte_crypto_auth_xform structure
+ * as part of the @ref rte_cryptodev_sym_session_create function
+ * call. This length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
+ * caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset of one
+ * byte into the array, leaving room for the implementation
+ * to write in the flags to the first byte.
+ *
+ * - the additional authentication data itself should be
+ * written starting at an offset of 18 bytes into the array,
+ * leaving room for the length encoding in the first two
+ * bytes of the second block.
+ *
+ * - the array should be big enough to hold the above fields,
+ * plus any padding to round this up to the nearest multiple
+ * of the block size (16 bytes). Padding will be added by
+ * the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus any
+ * space to round this up to the nearest multiple of the
+ * block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set to 0.
+ * Instead the AAD data should be placed in the source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint32_t length; /**< Length of digest */
+ } additional_auth;
+ /**< Additional authentication parameters */
+
+ struct rte_mempool *pool;
+ /**< mempool used to allocate crypto op */
+
+ void *user_data;
+ /**< opaque pointer for user data */
+};
+
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ */
+static inline void
+__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
+{
+ op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
+ op->dst.m = NULL;
+ op->dst.offset = 0;
+}
+
+/** Attach a session to a crypto operation */
+static inline void
+rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ op->session = sess;
+ op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTO_SYM_H_ */
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 2838852..c7fef6a 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -572,8 +572,8 @@ rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id);
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id);
int
rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
@@ -604,8 +604,10 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
}
/* Setup Session mempool for device */
- return rte_crypto_session_pool_create(dev, config->session_mp.nb_objs,
- config->session_mp.cache_size, config->socket_id);
+ return rte_cryptodev_sym_session_pool_create(dev,
+ config->session_mp.nb_objs,
+ config->session_mp.cache_size,
+ config->socket_id);
}
@@ -911,12 +913,12 @@ rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
static void
-rte_crypto_session_init(struct rte_mempool *mp,
+rte_cryptodev_sym_session_init(struct rte_mempool *mp,
void *opaque_arg,
void *_sess,
__rte_unused unsigned i)
{
- struct rte_cryptodev_session *sess = _sess;
+ struct rte_cryptodev_sym_session *sess = _sess;
struct rte_cryptodev *dev = opaque_arg;
memset(sess, 0, mp->elt_size);
@@ -930,8 +932,8 @@ rte_crypto_session_init(struct rte_mempool *mp,
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id)
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id)
{
char mp_name[RTE_CRYPTODEV_NAME_MAX_LEN];
unsigned priv_sess_size;
@@ -951,7 +953,7 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
return -ENOMEM;
}
- unsigned elt_size = sizeof(struct rte_cryptodev_session) +
+ unsigned elt_size = sizeof(struct rte_cryptodev_sym_session) +
priv_sess_size;
dev->data->session_pool = rte_mempool_lookup(mp_name);
@@ -975,7 +977,8 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
0, /* private data size */
NULL, /* obj initialization constructor */
NULL, /* obj initialization constructor arg */
- rte_crypto_session_init, /* obj constructor */
+ rte_cryptodev_sym_session_init,
+ /**< obj constructor*/
dev, /* obj constructor arg */
socket_id, /* socket id */
0); /* flags */
@@ -990,11 +993,12 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
return 0;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform)
{
struct rte_cryptodev *dev;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
void *_sess;
if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
@@ -1010,7 +1014,7 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
return NULL;
}
- sess = (struct rte_cryptodev_session *)_sess;
+ sess = (struct rte_cryptodev_sym_session *)_sess;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_configure, NULL);
if (dev->dev_ops->session_configure(dev, xform, sess->_private) ==
@@ -1026,8 +1030,9 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
return sess;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id, struct rte_cryptodev_session *sess)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess)
{
struct rte_cryptodev *dev;
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 892375d..732e2b9 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -1,6 +1,6 @@
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,14 +57,14 @@ extern "C" {
/**< Null crypto PMD device name */
#define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd")
/**< AES-NI Multi buffer PMD device name */
-#define CRYPTODEV_NAME_QAT_PMD ("cryptodev_qat_pmd")
-/**< Intel QAT PMD device name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd")
+/**< Intel QAT SYM PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
- RTE_CRYPTODEV_QAT_PMD, /**< QAT PMD */
+ RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT SYM PMD */
};
/* Logging Macros */
@@ -99,8 +99,11 @@ struct rte_cryptodev_info {
unsigned max_nb_queue_pairs;
/**< Maximum number of queues pairs supported by device. */
- unsigned max_nb_sessions;
- /**< Maximum number of sessions supported by device. */
+
+ struct {
+ unsigned max_nb_sessions;
+ /**< Maximum number of sessions supported by device. */
+ } sym;
};
#define RTE_CRYPTODEV_DETACHED (0)
@@ -130,17 +133,6 @@ struct rte_cryptodev_qp_conf {
typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
enum rte_cryptodev_event_type event, void *cb_arg);
-#ifdef RTE_CRYPTODEV_PERF
-/**
- * Crypto Device performance counter statistics structure. This structure is
- * used for RDTSC counters for measuring crypto operations.
- */
-struct rte_cryptodev_perf_stats {
- uint64_t t_accumlated; /**< Accumulated time processing operation */
- uint64_t t_min; /**< Max time */
- uint64_t t_max; /**< Min time */
-};
-#endif
/** Crypto Device statistics */
struct rte_cryptodev_stats {
@@ -153,29 +145,8 @@ struct rte_cryptodev_stats {
/**< Total error count on operations enqueued */
uint64_t dequeue_err_count;
/**< Total error count on operations dequeued */
+};
-#ifdef RTE_CRYPTODEV_DETAILED_STATS
- struct {
- uint64_t encrypt_ops; /**< Count of encrypt operations */
- uint64_t encrypt_bytes; /**< Number of bytes encrypted */
-
- uint64_t decrypt_ops; /**< Count of decrypt operations */
- uint64_t decrypt_bytes; /**< Number of bytes decrypted */
- } cipher; /**< Cipher operations stats */
-
- struct {
- uint64_t generate_ops; /**< Count of generate operations */
- uint64_t bytes_hashed; /**< Number of bytes hashed */
-
- uint64_t verify_ops; /**< Count of verify operations */
- uint64_t bytes_verified;/**< Number of bytes verified */
- } hash; /**< Hash operations stats */
-#endif
-
-#ifdef RTE_CRYPTODEV_PERF
- struct rte_cryptodev_perf_stats op_perf; /**< Operations stats */
-#endif
-} __rte_cache_aligned;
/**
* Create a virtual crypto device
@@ -607,6 +578,23 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
}
+/** Cryptodev symmetric crypto session */
+struct rte_cryptodev_sym_session {
+ struct {
+ uint8_t dev_id;
+ /**< Device Id */
+ enum rte_cryptodev_type type;
+ /** Crypto Device type session created on */
+ struct rte_mempool *mp;
+ /**< Mempool session allocated from */
+ } __rte_aligned(8);
+ /**< Public symmetric session details */
+
+ char _private[0];
+ /**< Private session material */
+};
+
+
/**
* Initialise a session for symmetric cryptographic operations.
*
@@ -628,9 +616,9 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
* @return
* Pointer to the created session or NULL
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id,
- struct rte_crypto_xform *xform);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform);
/**
@@ -638,15 +626,15 @@ rte_cryptodev_session_create(uint8_t dev_id,
*
* @param dev_id The device identifier.
* @param session Session pointer previously allocated by
- * *rte_cryptodev_session_create*.
+ * *rte_cryptodev_sym_session_create*.
*
* @return
* NULL on successful freeing of session.
* Session pointer on failure to free session.
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id,
- struct rte_cryptodev_session *session);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *session);
#ifdef __cplusplus
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index 8270afa..7d049ea 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -1,6 +1,6 @@
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,11 +56,6 @@ extern "C" {
#include "rte_crypto.h"
#include "rte_cryptodev.h"
-struct rte_cryptodev_stats;
-struct rte_cryptodev_info;
-struct rte_cryptodev_qp_conf;
-
-enum rte_cryptodev_event_type;
#ifdef RTE_LIBRTE_CRYPTODEV_DEBUG
#define RTE_PMD_DEBUG_TRACE(...) \
@@ -80,7 +75,6 @@ struct rte_cryptodev_session {
};
struct rte_cryptodev_driver;
-struct rte_cryptodev;
/**
* Initialisation function of a crypto driver invoked for each matching
@@ -364,7 +358,7 @@ typedef uint32_t (*cryptodev_queue_pair_count_t)(struct rte_cryptodev *dev);
* - On success returns a pointer to a rte_mempool
* - On failure returns a NULL pointer
*/
-typedef int (*cryptodev_create_session_pool_t)(
+typedef int (*cryptodev_sym_create_session_pool_t)(
struct rte_cryptodev *dev, unsigned nb_objs,
unsigned obj_cache_size, int socket_id);
@@ -378,7 +372,7 @@ typedef int (*cryptodev_create_session_pool_t)(
* - On success returns the size of the session structure for device
* - On failure returns 0
*/
-typedef unsigned (*cryptodev_get_session_private_size_t)(
+typedef unsigned (*cryptodev_sym_get_session_private_size_t)(
struct rte_cryptodev *dev);
/**
@@ -392,7 +386,7 @@ typedef unsigned (*cryptodev_get_session_private_size_t)(
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
+typedef void (*cryptodev_sym_initialize_session_t)(struct rte_mempool *mempool,
void *session_private);
/**
@@ -406,14 +400,14 @@ typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void * (*cryptodev_configure_session_t)(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+typedef void * (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
/**
* Free Crypto session.
* @param session Cryptodev session structure to free
*/
-typedef void (*cryptodev_free_session_t)(struct rte_cryptodev *dev,
+typedef void (*cryptodev_sym_free_session_t)(struct rte_cryptodev *dev,
void *session_private);
@@ -427,9 +421,9 @@ struct rte_cryptodev_ops {
cryptodev_info_get_t dev_infos_get; /**< Get device info. */
cryptodev_stats_get_t stats_get;
- /**< Get generic device statistics. */
+ /**< Get device statistics. */
cryptodev_stats_reset_t stats_reset;
- /**< Reset generic device statistics. */
+ /**< Reset device statistics. */
cryptodev_queue_pair_setup_t queue_pair_setup;
/**< Set up a device queue pair. */
@@ -442,13 +436,13 @@ struct rte_cryptodev_ops {
cryptodev_queue_pair_count_t queue_pair_count;
/**< Get count of the queue pairs. */
- cryptodev_get_session_private_size_t session_get_size;
+ cryptodev_sym_get_session_private_size_t session_get_size;
/**< Return private session. */
- cryptodev_initialize_session_t session_initialize;
+ cryptodev_sym_initialize_session_t session_initialize;
/**< Initialization function for private session data */
- cryptodev_configure_session_t session_configure;
+ cryptodev_sym_configure_session_t session_configure;
/**< Configure a Crypto session. */
- cryptodev_free_session_t session_clear;
+ cryptodev_sym_free_session_t session_clear;
/**< Clear a Crypto sessions private data. */
};
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
index 77993b6..5ce6058 100644
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ b/lib/librte_mbuf_offload/rte_mbuf_offload.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -67,7 +67,7 @@ extern "C" {
enum rte_mbuf_ol_op_type {
RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
/**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO
+ RTE_PKTMBUF_OL_CRYPTO_SYM
/**< Crypto offload operation */
};
@@ -84,7 +84,7 @@ struct rte_mbuf_offload {
enum rte_mbuf_ol_op_type type; /**< offload type */
union {
- struct rte_crypto_op crypto; /**< Crypto operation */
+ struct rte_crypto_sym_op crypto; /**< Crypto operation */
} op;
};
@@ -194,8 +194,8 @@ __rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
ol->type = type;
switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO:
- __rte_crypto_op_reset(&ol->op.crypto); break;
+ case RTE_PKTMBUF_OL_CRYPTO_SYM:
+ __rte_crypto_sym_op_reset(&ol->op.crypto); break;
default:
break;
}
@@ -278,24 +278,24 @@ __rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
* - On success returns pointer to first crypto xform in crypto operations chain
* - On failure returns NULL
*/
-static inline struct rte_crypto_xform *
-rte_pktmbuf_offload_alloc_crypto_xforms(struct rte_mbuf_offload *ol,
+static inline struct rte_crypto_sym_xform *
+rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
unsigned nb_xforms)
{
- struct rte_crypto_xform *xform;
+ struct rte_crypto_sym_xform *xform;
void *priv_data;
uint16_t size;
- size = sizeof(struct rte_crypto_xform) * nb_xforms;
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
if (priv_data == NULL)
return NULL;
- ol->op.crypto.xform = xform = (struct rte_crypto_xform *)priv_data;
+ ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
do {
- xform->type = RTE_CRYPTO_XFORM_NOT_SPECIFIED;
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
} while (xform);
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v4 2/2] cryptodev: change burst API to be crypto op oriented
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 " Declan Doherty
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 1/2] cryptodev: API tidy and changes to support future extensions Declan Doherty
@ 2016-02-29 16:52 ` Declan Doherty
2016-02-29 17:47 ` [dpdk-dev] [PATCH v4 0/2] cryptodev API changes Trahe, Fiona
` (7 subsequent siblings)
9 siblings, 0 replies; 62+ messages in thread
From: Declan Doherty @ 2016-02-29 16:52 UTC (permalink / raw)
To: dev
This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts
rte_crypto_op's rather than the current implementation which operates on
rte_mbuf bursts, this simplifies the burst processing in the crypto PMDs and the
use of crypto operations in general.
The changes also continues the separatation of the symmetric operation parameters
from the more general operation parameters, this will simplify the integration of
asymmetric crypto operations in the future.
As well as the changes to the crypto APIs this patch adds functions for managing
rte_crypto_op pools to the cryptodev API. It modifies the existing PMDs, unit
tests and sample application to work with the modified APIs and finally
removes the now unused rte_mbuf_offload library.
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
---
MAINTAINERS | 6 +-
app/test/test_cryptodev.c | 804 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 253 +++----
config/common_bsdapp | 8 -
config/common_linuxapp | 8 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 171 +++--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 12 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 2 +-
drivers/crypto/qat/qat_crypto.c | 123 ++--
drivers/crypto/qat/qat_crypto.h | 12 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 4 +-
examples/l2fwd-crypto/main.c | 283 ++++----
lib/Makefile | 1 -
lib/librte_cryptodev/rte_crypto.h | 364 +++++++++-
lib/librte_cryptodev/rte_crypto_sym.h | 379 +++++-----
lib/librte_cryptodev/rte_cryptodev.c | 76 ++
lib/librte_cryptodev/rte_cryptodev.h | 109 ++-
lib/librte_cryptodev/rte_cryptodev_version.map | 3 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 --------
.../rte_mbuf_offload_version.map | 7 -
25 files changed, 1575 insertions(+), 1528 deletions(-)
delete mode 100644 lib/librte_mbuf_offload/Makefile
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
diff --git a/MAINTAINERS b/MAINTAINERS
index 628bc05..ad6b45e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -222,16 +222,12 @@ F: lib/librte_mbuf/
F: doc/guides/prog_guide/mbuf_lib.rst
F: app/test/test_mbuf.c
-Packet buffer offload - EXPERIMENTAL
-M: Declan Doherty <declan.doherty@intel.com>
-F: lib/librte_mbuf_offload/
-
Ethernet API
M: Thomas Monjalon <thomas.monjalon@6wind.com>
F: lib/librte_ether/
F: scripts/test-null.sh
-Crypto API - EXPERIMENTAL
+Crypto API
M: Declan Doherty <declan.doherty@intel.com>
F: lib/librte_cryptodev/
F: app/test/test_cryptodev*
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 951b443..208fc14 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -35,7 +35,6 @@
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#include <rte_mbuf_offload.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
@@ -48,7 +47,7 @@ static enum rte_cryptodev_type gbl_cryptodev_type;
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
struct rte_cryptodev_config conf;
struct rte_cryptodev_qp_conf qp_conf;
@@ -62,8 +61,7 @@ struct crypto_unittest_params {
struct rte_cryptodev_sym_session *sess;
- struct rte_mbuf_offload *ol;
- struct rte_crypto_sym_op *op;
+ struct rte_crypto_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -104,7 +102,7 @@ setup_test_string(struct rte_mempool *mpool,
return m;
}
-#if HEX_DUMP
+#ifdef HEX_DUMP
static void
hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
{
@@ -112,27 +110,29 @@ hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
}
#endif
-static struct rte_mbuf *
-process_crypto_request(uint8_t dev_id, struct rte_mbuf *ibuf)
+static struct rte_crypto_op *
+process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)
{
- struct rte_mbuf *obuf = NULL;
-#if HEX_DUMP
+#ifdef HEX_DUMP
hexdump_mbuf_data(stdout, "Enqueued Packet", ibuf);
#endif
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &ibuf, 1) != 1) {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
printf("Error sending packet for encryption");
return NULL;
}
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &obuf, 1) == 0)
+
+ op = NULL;
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
rte_pause();
-#if HEX_DUMP
+#ifdef HEX_DUMP
if (obuf)
hexdump_mbuf_data(stdout, "Dequeued Packet", obuf);
#endif
- return obuf;
+ return op;
}
static struct crypto_testsuite_params testsuite_params = { NULL };
@@ -162,13 +162,14 @@ testsuite_setup(void)
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "MBUF_OFFLOAD_POOL",
+ ts_params->op_mpool = rte_crypto_op_pool_create(
+ "MBUF_CRYPTO_SYM_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_sym_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -253,10 +254,9 @@ testsuite_teardown(void)
rte_mempool_count(ts_params->mbuf_pool));
}
-
- if (ts_params->mbuf_ol_pool != NULL) {
+ if (ts_params->op_mpool != NULL) {
RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n",
- rte_mempool_count(ts_params->mbuf_ol_pool));
+ rte_mempool_count(ts_params->op_mpool));
}
}
@@ -326,8 +326,8 @@ ut_teardown(void)
}
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
/*
* free mbuf - both obuf and ibuf are usually the same,
@@ -680,76 +680,77 @@ static uint8_t aes_cbc_iv[] = {
/* ***** AES-CBC / HMAC-SHA1 Hash Tests ***** */
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_ciphertext[] = {
- 0x8B, 0X4D, 0XDA, 0X1B, 0XCF, 0X04, 0XA0, 0X31,
- 0XB4, 0XBF, 0XBD, 0X68, 0X43, 0X20, 0X7E, 0X76,
- 0XB1, 0X96, 0X8B, 0XA2, 0X7C, 0XA2, 0X83, 0X9E,
- 0X39, 0X5A, 0X2F, 0X7E, 0X92, 0XB4, 0X48, 0X1A,
- 0X3F, 0X6B, 0X5D, 0XDF, 0X52, 0X85, 0X5F, 0X8E,
- 0X42, 0X3C, 0XFB, 0XE9, 0X1A, 0X24, 0XD6, 0X08,
- 0XDD, 0XFD, 0X16, 0XFB, 0XE9, 0X55, 0XEF, 0XF0,
- 0XA0, 0X8D, 0X13, 0XAB, 0X81, 0XC6, 0X90, 0X01,
- 0XB5, 0X18, 0X84, 0XB3, 0XF6, 0XE6, 0X11, 0X57,
- 0XD6, 0X71, 0XC6, 0X3C, 0X3F, 0X2F, 0X33, 0XEE,
- 0X24, 0X42, 0X6E, 0XAC, 0X0B, 0XCA, 0XEC, 0XF9,
- 0X84, 0XF8, 0X22, 0XAA, 0X60, 0XF0, 0X32, 0XA9,
- 0X75, 0X75, 0X3B, 0XCB, 0X70, 0X21, 0X0A, 0X8D,
- 0X0F, 0XE0, 0XC4, 0X78, 0X2B, 0XF8, 0X97, 0XE3,
- 0XE4, 0X26, 0X4B, 0X29, 0XDA, 0X88, 0XCD, 0X46,
- 0XEC, 0XAA, 0XF9, 0X7F, 0XF1, 0X15, 0XEA, 0XC3,
- 0X87, 0XE6, 0X31, 0XF2, 0XCF, 0XDE, 0X4D, 0X80,
- 0X70, 0X91, 0X7E, 0X0C, 0XF7, 0X26, 0X3A, 0X92,
- 0X4F, 0X18, 0X83, 0XC0, 0X8F, 0X59, 0X01, 0XA5,
- 0X88, 0XD1, 0XDB, 0X26, 0X71, 0X27, 0X16, 0XF5,
- 0XEE, 0X10, 0X82, 0XAC, 0X68, 0X26, 0X9B, 0XE2,
- 0X6D, 0XD8, 0X9A, 0X80, 0XDF, 0X04, 0X31, 0XD5,
- 0XF1, 0X35, 0X5C, 0X3B, 0XDD, 0X9A, 0X65, 0XBA,
- 0X58, 0X34, 0X85, 0X61, 0X1C, 0X42, 0X10, 0X76,
- 0X73, 0X02, 0X42, 0XC9, 0X23, 0X18, 0X8E, 0XB4,
- 0X6F, 0XB4, 0XA3, 0X54, 0X6E, 0X88, 0X3B, 0X62,
- 0X7C, 0X02, 0X8D, 0X4C, 0X9F, 0XC8, 0X45, 0XF4,
- 0XC9, 0XDE, 0X4F, 0XEB, 0X22, 0X83, 0X1B, 0XE4,
- 0X49, 0X37, 0XE4, 0XAD, 0XE7, 0XCD, 0X21, 0X54,
- 0XBC, 0X1C, 0XC2, 0X04, 0X97, 0XB4, 0X10, 0X61,
- 0XF0, 0XE4, 0XEF, 0X27, 0X63, 0X3A, 0XDA, 0X91,
- 0X41, 0X25, 0X62, 0X1C, 0X5C, 0XB6, 0X38, 0X4A,
- 0X88, 0X71, 0X59, 0X5A, 0X8D, 0XA0, 0X09, 0XAF,
- 0X72, 0X94, 0XD7, 0X79, 0X5C, 0X60, 0X7C, 0X8F,
- 0X4C, 0XF5, 0XD9, 0XA1, 0X39, 0X6D, 0X81, 0X28,
- 0XEF, 0X13, 0X28, 0XDF, 0XF5, 0X3E, 0XF7, 0X8E,
- 0X09, 0X9C, 0X78, 0X18, 0X79, 0XB8, 0X68, 0XD7,
- 0XA8, 0X29, 0X62, 0XAD, 0XDE, 0XE1, 0X61, 0X76,
- 0X1B, 0X05, 0X16, 0XCD, 0XBF, 0X02, 0X8E, 0XA6,
- 0X43, 0X6E, 0X92, 0X55, 0X4F, 0X60, 0X9C, 0X03,
- 0XB8, 0X4F, 0XA3, 0X02, 0XAC, 0XA8, 0XA7, 0X0C,
- 0X1E, 0XB5, 0X6B, 0XF8, 0XC8, 0X4D, 0XDE, 0XD2,
- 0XB0, 0X29, 0X6E, 0X40, 0XE6, 0XD6, 0XC9, 0XE6,
- 0XB9, 0X0F, 0XB6, 0X63, 0XF5, 0XAA, 0X2B, 0X96,
- 0XA7, 0X16, 0XAC, 0X4E, 0X0A, 0X33, 0X1C, 0XA6,
- 0XE6, 0XBD, 0X8A, 0XCF, 0X40, 0XA9, 0XB2, 0XFA,
- 0X63, 0X27, 0XFD, 0X9B, 0XD9, 0XFC, 0XD5, 0X87,
- 0X8D, 0X4C, 0XB6, 0XA4, 0XCB, 0XE7, 0X74, 0X55,
- 0XF4, 0XFB, 0X41, 0X25, 0XB5, 0X4B, 0X0A, 0X1B,
- 0XB1, 0XD6, 0XB7, 0XD9, 0X47, 0X2A, 0XC3, 0X98,
- 0X6A, 0XC4, 0X03, 0X73, 0X1F, 0X93, 0X6E, 0X53,
- 0X19, 0X25, 0X64, 0X15, 0X83, 0XF9, 0X73, 0X2A,
- 0X74, 0XB4, 0X93, 0X69, 0XC4, 0X72, 0XFC, 0X26,
- 0XA2, 0X9F, 0X43, 0X45, 0XDD, 0XB9, 0XEF, 0X36,
- 0XC8, 0X3A, 0XCD, 0X99, 0X9B, 0X54, 0X1A, 0X36,
- 0XC1, 0X59, 0XF8, 0X98, 0XA8, 0XCC, 0X28, 0X0D,
- 0X73, 0X4C, 0XEE, 0X98, 0XCB, 0X7C, 0X58, 0X7E,
- 0X20, 0X75, 0X1E, 0XB7, 0XC9, 0XF8, 0XF2, 0X0E,
- 0X63, 0X9E, 0X05, 0X78, 0X1A, 0XB6, 0XA8, 0X7A,
- 0XF9, 0X98, 0X6A, 0XA6, 0X46, 0X84, 0X2E, 0XF6,
- 0X4B, 0XDC, 0X9B, 0X8F, 0X9B, 0X8F, 0XEE, 0XB4,
- 0XAA, 0X3F, 0XEE, 0XC0, 0X37, 0X27, 0X76, 0XC7,
- 0X95, 0XBB, 0X26, 0X74, 0X69, 0X12, 0X7F, 0XF1,
- 0XBB, 0XFF, 0XAE, 0XB5, 0X99, 0X6E, 0XCB, 0X0C
+ 0x8B, 0x4D, 0xDA, 0x1B, 0xCF, 0x04, 0xA0, 0x31,
+ 0xB4, 0xBF, 0xBD, 0x68, 0x43, 0x20, 0x7E, 0x76,
+ 0xB1, 0x96, 0x8B, 0xA2, 0x7C, 0xA2, 0x83, 0x9E,
+ 0x39, 0x5A, 0x2F, 0x7E, 0x92, 0xB4, 0x48, 0x1A,
+ 0x3F, 0x6B, 0x5D, 0xDF, 0x52, 0x85, 0x5F, 0x8E,
+ 0x42, 0x3C, 0xFB, 0xE9, 0x1A, 0x24, 0xD6, 0x08,
+ 0xDD, 0xFD, 0x16, 0xFB, 0xE9, 0x55, 0xEF, 0xF0,
+ 0xA0, 0x8D, 0x13, 0xAB, 0x81, 0xC6, 0x90, 0x01,
+ 0xB5, 0x18, 0x84, 0xB3, 0xF6, 0xE6, 0x11, 0x57,
+ 0xD6, 0x71, 0xC6, 0x3C, 0x3F, 0x2F, 0x33, 0xEE,
+ 0x24, 0x42, 0x6E, 0xAC, 0x0B, 0xCA, 0xEC, 0xF9,
+ 0x84, 0xF8, 0x22, 0xAA, 0x60, 0xF0, 0x32, 0xA9,
+ 0x75, 0x75, 0x3B, 0xCB, 0x70, 0x21, 0x0A, 0x8D,
+ 0x0F, 0xE0, 0xC4, 0x78, 0x2B, 0xF8, 0x97, 0xE3,
+ 0xE4, 0x26, 0x4B, 0x29, 0xDA, 0x88, 0xCD, 0x46,
+ 0xEC, 0xAA, 0xF9, 0x7F, 0xF1, 0x15, 0xEA, 0xC3,
+ 0x87, 0xE6, 0x31, 0xF2, 0xCF, 0xDE, 0x4D, 0x80,
+ 0x70, 0x91, 0x7E, 0x0C, 0xF7, 0x26, 0x3A, 0x92,
+ 0x4F, 0x18, 0x83, 0xC0, 0x8F, 0x59, 0x01, 0xA5,
+ 0x88, 0xD1, 0xDB, 0x26, 0x71, 0x27, 0x16, 0xF5,
+ 0xEE, 0x10, 0x82, 0xAC, 0x68, 0x26, 0x9B, 0xE2,
+ 0x6D, 0xD8, 0x9A, 0x80, 0xDF, 0x04, 0x31, 0xD5,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x73, 0x02, 0x42, 0xC9, 0x23, 0x18, 0x8E, 0xB4,
+ 0x6F, 0xB4, 0xA3, 0x54, 0x6E, 0x88, 0x3B, 0x62,
+ 0x7C, 0x02, 0x8D, 0x4C, 0x9F, 0xC8, 0x45, 0xF4,
+ 0xC9, 0xDE, 0x4F, 0xEB, 0x22, 0x83, 0x1B, 0xE4,
+ 0x49, 0x37, 0xE4, 0xAD, 0xE7, 0xCD, 0x21, 0x54,
+ 0xBC, 0x1C, 0xC2, 0x04, 0x97, 0xB4, 0x10, 0x61,
+ 0xF0, 0xE4, 0xEF, 0x27, 0x63, 0x3A, 0xDA, 0x91,
+ 0x41, 0x25, 0x62, 0x1C, 0x5C, 0xB6, 0x38, 0x4A,
+ 0x88, 0x71, 0x59, 0x5A, 0x8D, 0xA0, 0x09, 0xAF,
+ 0x72, 0x94, 0xD7, 0x79, 0x5C, 0x60, 0x7C, 0x8F,
+ 0x4C, 0xF5, 0xD9, 0xA1, 0x39, 0x6D, 0x81, 0x28,
+ 0xEF, 0x13, 0x28, 0xDF, 0xF5, 0x3E, 0xF7, 0x8E,
+ 0x09, 0x9C, 0x78, 0x18, 0x79, 0xB8, 0x68, 0xD7,
+ 0xA8, 0x29, 0x62, 0xAD, 0xDE, 0xE1, 0x61, 0x76,
+ 0x1B, 0x05, 0x16, 0xCD, 0xBF, 0x02, 0x8E, 0xA6,
+ 0x43, 0x6E, 0x92, 0x55, 0x4F, 0x60, 0x9C, 0x03,
+ 0xB8, 0x4F, 0xA3, 0x02, 0xAC, 0xA8, 0xA7, 0x0C,
+ 0x1E, 0xB5, 0x6B, 0xF8, 0xC8, 0x4D, 0xDE, 0xD2,
+ 0xB0, 0x29, 0x6E, 0x40, 0xE6, 0xD6, 0xC9, 0xE6,
+ 0xB9, 0x0F, 0xB6, 0x63, 0xF5, 0xAA, 0x2B, 0x96,
+ 0xA7, 0x16, 0xAC, 0x4E, 0x0A, 0x33, 0x1C, 0xA6,
+ 0xE6, 0xBD, 0x8A, 0xCF, 0x40, 0xA9, 0xB2, 0xFA,
+ 0x63, 0x27, 0xFD, 0x9B, 0xD9, 0xFC, 0xD5, 0x87,
+ 0x8D, 0x4C, 0xB6, 0xA4, 0xCB, 0xE7, 0x74, 0x55,
+ 0xF4, 0xFB, 0x41, 0x25, 0xB5, 0x4B, 0x0A, 0x1B,
+ 0xB1, 0xD6, 0xB7, 0xD9, 0x47, 0x2A, 0xC3, 0x98,
+ 0x6A, 0xC4, 0x03, 0x73, 0x1F, 0x93, 0x6E, 0x53,
+ 0x19, 0x25, 0x64, 0x15, 0x83, 0xF9, 0x73, 0x2A,
+ 0x74, 0xB4, 0x93, 0x69, 0xC4, 0x72, 0xFC, 0x26,
+ 0xA2, 0x9F, 0x43, 0x45, 0xDD, 0xB9, 0xEF, 0x36,
+ 0xC8, 0x3A, 0xCD, 0x99, 0x9B, 0x54, 0x1A, 0x36,
+ 0xC1, 0x59, 0xF8, 0x98, 0xA8, 0xCC, 0x28, 0x0D,
+ 0x73, 0x4C, 0xEE, 0x98, 0xCB, 0x7C, 0x58, 0x7E,
+ 0x20, 0x75, 0x1E, 0xB7, 0xC9, 0xF8, 0xF2, 0x0E,
+ 0x63, 0x9E, 0x05, 0x78, 0x1A, 0xB6, 0xA8, 0x7A,
+ 0xF9, 0x98, 0x6A, 0xA6, 0x46, 0x84, 0x2E, 0xF6,
+ 0x4B, 0xDC, 0x9B, 0x8F, 0x9B, 0x8F, 0xEE, 0xB4,
+ 0xAA, 0x3F, 0xEE, 0xC0, 0x37, 0x27, 0x76, 0xC7,
+ 0x95, 0xBB, 0x26, 0x74, 0x69, 0x12, 0x7F, 0xF1,
+ 0xBB, 0xFF, 0xAE, 0xB5, 0x99, 0x6E, 0xCB, 0x0C
};
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest[] = {
- 0x9a, 0X4f, 0X88, 0X1b, 0Xb6, 0X8f, 0Xd8, 0X60,
- 0X42, 0X1a, 0X7d, 0X3d, 0Xf5, 0X82, 0X80, 0Xf1,
- 0X18, 0X8c, 0X1d, 0X32 };
+ 0x9a, 0x4f, 0x88, 0x1b, 0xb6, 0x8f, 0xd8, 0x60,
+ 0x42, 0x1a, 0x7d, 0x3d, 0xf5, 0x82, 0x80, 0xf1,
+ 0x18, 0x8c, 0x1d, 0x32
+};
static int
@@ -776,7 +777,6 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
-
ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
@@ -786,59 +786,65 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.key.data = hmac_sha1_key;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
- /* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ /* Create crypto session*/
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
- /* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ /* Generate crypto op data structure */
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ /* Set crypto operation authentication parameters */
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ /* Set crypto operation cipher parameters */
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ uint8_t *ciphertext = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC);
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(ciphertext,
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
- "Ciphertext data not as expected");
+ "ciphertext data not as expected");
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ uint8_t *digest = ciphertext + QUOTE_512_BYTES;
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(digest,
catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest,
gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 :
@@ -863,60 +869,66 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
- ut_params->ol, 2),
+ TEST_ASSERT_NOT_NULL(rte_crypto_op_sym_xforms_alloc(ut_params->op, 2),
"failed to allocate space for crypto transforms");
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ sym_op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
/* cipher parameters */
- ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- ut_params->op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->op->xform->cipher.key.data = aes_cbc_key;
- ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
+ sym_op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ sym_op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ sym_op->xform->cipher.key.data = aes_cbc_key;
+ sym_op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ sym_op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- ut_params->op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
- ut_params->op->xform->next->auth.key.data = hmac_sha1_key;
- ut_params->op->xform->next->auth.digest_length =
+ sym_op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ sym_op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ sym_op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
+ sym_op->xform->next->auth.key.data = hmac_sha1_key;
+ sym_op->xform->next->auth.digest_length =
DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -985,42 +997,48 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ /* attach symmetric crypto session to crypto operations */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
+
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1088,47 +1106,51 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1197,42 +1219,47 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
- /* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1244,7 +1271,6 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
"Digest verification failed");
-
return TEST_SUCCESS;
}
@@ -1312,43 +1338,46 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
-
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1367,7 +1396,6 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
DIGEST_BYTE_LENGTH_SHA512,
"Generated digest data not as expected");
-
return TEST_SUCCESS;
}
@@ -1424,6 +1452,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
ut_params->auth_xform.auth.key.data = hmac_sha512_key;
ut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA512;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
+
return TEST_SUCCESS;
}
@@ -1448,43 +1477,46 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
- /* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1497,8 +1529,6 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
"Digest verification failed");
-
-
return TEST_SUCCESS;
}
@@ -1524,10 +1554,6 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
catch_22_quote, QUOTE_512_BYTES, 0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_AES_XCBC);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
@@ -1548,56 +1574,75 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->iv.data = (uint8_t *)
- rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ /* Set operation cipher parameters */
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->m_src, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(sym_op->m_src);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
+
+ /* Set operation authentication parameters */
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ sym_op->m_src, DIGEST_BYTE_LENGTH_AES_XCBC);
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->m_src,
+ CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES);
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+ memset(sym_op->auth.digest.data, 0, DIGEST_BYTE_LENGTH_AES_XCBC);
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC),
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
"Ciphertext data not as expected");
+
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ rte_pktmbuf_mtod_offset(
+ ut_params->op->sym->m_src, uint8_t *,
+ CIPHER_IV_LENGTH_AES_CBC +
+ QUOTE_512_BYTES),
catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
- DIGEST_BYTE_LENGTH_AES_XCBC,
+ gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
+ TRUNCATED_DIGEST_BYTE_LENGTH_AES_XCBC :
+ DIGEST_BYTE_LENGTH_AES_XCBC,
"Generated digest data not as expected");
return TEST_SUCCESS;
@@ -1614,14 +1659,6 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
(const char *)catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES, 0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_AES_XCBC);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
- rte_memcpy(ut_params->digest,
- catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
- DIGEST_BYTE_LENGTH_AES_XCBC);
-
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
@@ -1648,35 +1685,55 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ /* Set crypto operation data parameters */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ ut_params->ibuf, DIGEST_BYTE_LENGTH_AES_XCBC);
+ TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+ "no room to append digest");
+
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ ut_params->ibuf, QUOTE_512_BYTES);
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+ rte_memcpy(sym_op->auth.digest.data,
+ catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
+ DIGEST_BYTE_LENGTH_AES_XCBC);
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1837,50 +1894,53 @@ test_not_in_place_crypto(void)
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+ sym_op->m_dst = dst_m;
+
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
-
- ut_params->op->dst.m = dst_m;
- ut_params->op->dst.offset = 0;
-
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "no crypto operation returned");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto operation processing failed");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->op->dst.m, char *),
+ rte_pktmbuf_mtod(ut_params->op->sym->m_dst, char *),
catch_22_quote,
QUOTE_512_BYTES,
"Plaintext data not as expected");
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 034393e..c84ba42 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -32,8 +32,6 @@
#ifndef TEST_CRYPTODEV_H_
#define TEST_CRYPTODEV_H_
-#define HEX_DUMP 0
-
#define FALSE 0
#define TRUE 1
@@ -47,8 +45,9 @@
#define DEFAULT_NUM_XFORMS (2)
#define NUM_MBUFS (8191)
#define MBUF_CACHE_SIZE (250)
-#define MBUF_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512 + \
- sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATAPAYLOAD_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512)
+#define MBUF_SIZE (sizeof(struct rte_mbuf) + \
+ RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
#define BYTE_LENGTH(x) (x/8)
/* HASH DIGEST LENGTHS */
@@ -62,7 +61,9 @@
#define AES_XCBC_MAC_KEY_SZ (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 (12)
+#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA224 (14)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA256 (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA512 (32)
+#define TRUNCATED_DIGEST_BYTE_LENGTH_AES_XCBC (12)
#endif /* TEST_CRYPTODEV_H_ */
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 42dd9bc..b3f4fd9 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -32,7 +32,6 @@
#include <rte_common.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
@@ -50,7 +49,7 @@
struct crypto_testsuite_params {
struct rte_mempool *mbuf_mp;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
uint16_t nb_queue_pairs;
@@ -68,8 +67,7 @@ struct crypto_unittest_params {
struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_sym_op *op;
- struct rte_mbuf_offload *ol;
+ struct rte_crypto_op *op;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
struct rte_mbuf *ibuf[MAX_NUM_OF_OPS_PER_UT];
@@ -79,7 +77,7 @@ struct crypto_unittest_params {
static struct rte_mbuf *
setup_test_string(struct rte_mempool *mpool,
- const char *string, size_t len, uint8_t blocksize)
+ const uint8_t *data, size_t len, uint8_t blocksize)
{
struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
size_t t_len = len - (blocksize ? (len % blocksize) : 0);
@@ -92,7 +90,7 @@ setup_test_string(struct rte_mempool *mpool,
return NULL;
}
- rte_memcpy(dst, string, t_len);
+ rte_memcpy(dst, (const void *)data, t_len);
}
return m;
}
@@ -113,23 +111,24 @@ testsuite_setup(void)
ts_params->mbuf_mp = rte_mempool_lookup("CRYPTO_PERF_MBUFPOOL");
if (ts_params->mbuf_mp == NULL) {
/* Not already created so create */
- ts_params->mbuf_mp = rte_mempool_create("CRYPTO_PERF_MBUFPOOL", NUM_MBUFS,
- MBUF_SIZE, MBUF_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
- rte_socket_id(), 0);
+ ts_params->mbuf_mp = rte_pktmbuf_pool_create(
+ "CRYPTO_PERF_MBUFPOOL",
+ NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
+ rte_socket_id());
if (ts_params->mbuf_mp == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_PERF_MBUFPOOL\n");
return TEST_FAILED;
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
- NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_sym_xform),
- rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+
+ ts_params->op_mpool = rte_crypto_op_pool_create("CRYPTO_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ NUM_MBUFS, MBUF_CACHE_SIZE,
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_sym_xform),
+ rte_socket_id());
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -256,8 +255,8 @@ ut_teardown(void)
ut_params->sess);
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
for (i = 0; i < MAX_NUM_OF_OPS_PER_UT; i++) {
if (ut_params->obuf[i])
@@ -1698,11 +1697,12 @@ struct crypto_data_params aes_cbc_hmac_sha256_output[MAX_PACKET_SIZE_INDEX] = {
static int
test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
{
- uint32_t num_to_submit = 2048, max_outstanding_reqs = 512;
- struct rte_mbuf *rx_mbufs[num_to_submit], *tx_mbufs[num_to_submit];
+ uint32_t num_to_submit = 4096;
+ struct rte_crypto_op *c_ops[num_to_submit];
+ struct rte_crypto_op *proc_ops[num_to_submit];
uint64_t failed_polls, retries, start_cycles, end_cycles, total_cycles = 0;
uint32_t burst_sent, burst_received;
- uint32_t b, burst_size, num_sent, num_received;
+ uint32_t i, burst_size, num_sent, num_received;
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1739,46 +1739,50 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure(s) */
- for (b = 0; b < num_to_submit ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ for (i = 0; i < num_to_submit ; i++) {
+ struct rte_mbuf *m = setup_test_string(ts_params->mbuf_mp,
+ data_params[0].expected.ciphertext,
data_params[0].length, 0);
- TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
+ TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(tx_mbufs[b],
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ TEST_ASSERT_NOT_NULL(ut_params->digest,
+ "no room to append digest");
rte_memcpy(ut_params->digest, data_params[0].expected.digest,
DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_sym_op *cop = &ol->op.crypto;
+ struct rte_crypto_op *op =
+ rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+ rte_crypto_op_attach_sym_session(op, ut_params->sess);
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
+ op->sym->auth.digest.data = ut_params->digest;
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
data_params[0].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->auth.data.length = data_params[0].length;
+
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ op->sym->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(m,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[0].length;
+ op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.data.length = data_params[0].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[0].length;
+ op->sym->m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[i] = op;
}
printf("\nTest to measure the IA cycle cost using AES128_CBC_SHA256_HMAC "
@@ -1789,17 +1793,17 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
printf("\nDev No\tQP No\tNum Sent\tNum Received\tTx/Rx burst");
printf("\tRetries (Device Busy)\tAverage IA cycle cost "
"(assuming 0 retries)");
- for (b = 2; b <= 128 ; b *= 2) {
+ for (i = 2; i <= 128 ; i *= 2) {
num_sent = 0;
num_received = 0;
retries = 0;
failed_polls = 0;
- burst_size = b;
+ burst_size = i;
total_cycles = 0;
while (num_sent < num_to_submit) {
start_cycles = rte_rdtsc_precise();
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0,
- &tx_mbufs[num_sent],
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, &c_ops[num_sent],
((num_to_submit-num_sent) < burst_size) ?
num_to_submit-num_sent : burst_size);
if (burst_sent == 0)
@@ -1814,9 +1818,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1824,15 +1827,15 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
end_cycles = rte_rdtsc_precise();
total_cycles += end_cycles - start_cycles;
}
+
while (num_received != num_to_submit) {
if (gbl_cryptodev_preftest_devtype ==
- RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num,
- 0, NULL, 0);
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1846,16 +1849,9 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
printf("\n");
- for (b = 0; b < max_outstanding_reqs ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ for (i = 0; i < num_to_submit ; i++) {
+ rte_pktmbuf_free(c_ops[i]->sym->m_src);
+ rte_crypto_op_free(c_ops[i]);
}
return TEST_SUCCESS;
}
@@ -1865,11 +1861,14 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
{
uint16_t index;
uint32_t burst_sent, burst_received;
- uint32_t b, num_sent, num_received, throughput;
+ uint32_t b, num_sent, num_received;
uint64_t failed_polls, retries, start_cycles, end_cycles;
const uint64_t mhz = rte_get_tsc_hz()/1000000;
- double mmps;
- struct rte_mbuf *rx_mbufs[DEFAULT_BURST_SIZE], *tx_mbufs[DEFAULT_BURST_SIZE];
+ double throughput, mmps;
+
+ struct rte_crypto_op *c_ops[DEFAULT_BURST_SIZE];
+ struct rte_crypto_op *proc_ops[DEFAULT_BURST_SIZE];
+
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1908,7 +1907,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
"AES128_CBC_SHA256_HMAC requests with a constant burst "
"size of %u while varying payload sizes", DEFAULT_BURST_SIZE);
printf("\nDev No\tQP No\tReq Size(B)\tNum Sent\tNum Received\t"
- "Mrps\tThoughput(Mbps)");
+ "Mrps\tThoughput(Gbps)");
printf("\tRetries (Attempted a burst, but the device was busy)");
for (index = 0; index < MAX_PACKET_SIZE_INDEX; index++) {
num_sent = 0;
@@ -1918,63 +1917,70 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
+ struct rte_mbuf *m = setup_test_string(
+ ts_params->mbuf_mp,
+ (const uint8_t *)
data_params[index].plaintext,
data_params[index].length,
0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(
- tx_mbufs[b], DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
+ DIGEST_BYTE_LENGTH_SHA256);
+ TEST_ASSERT_NOT_NULL(ut_params->digest
+ , "no room to append digest");
- rte_memcpy(ut_params->digest, data_params[index].expected.digest,
- DIGEST_BYTE_LENGTH_SHA256);
+ rte_memcpy(ut_params->digest,
+ data_params[index].expected.digest,
+ DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
+ struct rte_crypto_op *op = rte_crypto_op_alloc(
+ ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- struct rte_crypto_sym_op *cop = &ol->op.crypto;
+ rte_crypto_op_attach_sym_session(op, ut_params->sess);
- rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+ op->sym->auth.digest.data = ut_params->digest;
+ op->sym->auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(m,
+ data_params[index].length);
+ op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
- tx_mbufs[b], data_params[index].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->auth.data.length = data_params[index].length;
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ op->sym->cipher.iv.data = (uint8_t *)
+ rte_pktmbuf_prepend(m,
+ CIPHER_IV_LENGTH_AES_CBC);
+ op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.data.length = data_params[index].length;
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[index].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[index].length;
+ op->sym->m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[b] = op;
}
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num,
- 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
- < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
- DEFAULT_BURST_SIZE);
+ uint16_t burst_size = (DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent) < DEFAULT_BURST_SIZE ?
+ DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent : DEFAULT_BURST_SIZE;
+
+ burst_sent = rte_cryptodev_enqueue_burst(
+ dev_num, 0, c_ops, burst_size);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, DEFAULT_BURST_SIZE);
+ burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ 0, proc_ops, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
@@ -1982,37 +1988,34 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
if (gbl_cryptodev_preftest_devtype ==
- RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num,
- 0, NULL, 0);
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num, 0,
- rx_mbufs, DEFAULT_BURST_SIZE);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops,
+ DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
num_received += burst_received;
}
end_cycles = rte_rdtsc_precise();
- mmps = (double)num_received*mhz/(end_cycles - start_cycles);
- throughput = mmps*data_params[index].length*8;
+ mmps = ((double)num_received * mhz) /
+ (end_cycles - start_cycles);
+ throughput = (mmps * data_params[index].length * 8) / 1000;
+
printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
- data_params[index].length, num_sent, num_received);
- printf("\t%.2f\t%u", mmps, throughput);
+ data_params[index].length,
+ num_sent, num_received);
+ printf("\t%.2f\t%.2f", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ rte_pktmbuf_free(c_ops[b]->sym->m_src);
+ rte_crypto_op_free(c_ops[b]);
}
}
+
printf("\n");
return TEST_SUCCESS;
}
diff --git a/config/common_bsdapp b/config/common_bsdapp
index 696382c..69a1016 100644
--- a/config/common_bsdapp
+++ b/config/common_bsdapp
@@ -306,7 +306,6 @@ CONFIG_RTE_PMD_PACKET_PREFETCH=y
#
# Compile generic crypto device library
-# EXPERIMENTAL: API may change without prior notice
#
CONFIG_RTE_LIBRTE_CRYPTODEV=y
CONFIG_RTE_LIBRTE_CRYPTODEV_DEBUG=n
@@ -357,13 +356,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
#
-# Compile librte_mbuf_offload
-# EXPERIMENTAL: API may change without prior notice
-#
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n
-
-#
# Compile librte_timer
#
CONFIG_RTE_LIBRTE_TIMER=y
diff --git a/config/common_linuxapp b/config/common_linuxapp
index f1638db..6da8f54 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -320,7 +320,6 @@ CONFIG_RTE_PMD_PACKET_PREFETCH=y
#
# Compile generic crypto device library
-# EXPERIMENTAL: API may change without prior notice
#
CONFIG_RTE_LIBRTE_CRYPTODEV=y
CONFIG_RTE_LIBRTE_CRYPTODEV_DEBUG=n
@@ -373,13 +372,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
#
-# Compile librte_mbuf_offload
-# EXPERIMENTAL: API may change without prior notice
-#
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n
-
-#
# Compile librte_timer
#
CONFIG_RTE_LIBRTE_TIMER=y
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 7a91001..f626386 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -104,7 +104,6 @@ There are many libraries, so their headers may be grouped by topics:
- **containers**:
[mbuf] (@ref rte_mbuf.h),
- [mbuf_offload] (@ref rte_mbuf_offload.h),
[ring] (@ref rte_ring.h),
[distributor] (@ref rte_distributor.h),
[reorder] (@ref rte_reorder.h),
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 61d93cd..5b26444 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -37,7 +37,6 @@
#include <rte_dev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
-#include <rte_mbuf_offload.h>
#include "rte_aesni_mb_pmd_private.h"
@@ -296,16 +295,16 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(crypto_op->session->type !=
+ if (op->sym->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
- sess = (struct aesni_mb_session *)crypto_op->session->_private;
+ sess = (struct aesni_mb_session *)op->sym->session->_private;
} else {
void *_sess = NULL;
@@ -316,7 +315,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
- sess, crypto_op->xform) != 0)) {
+ sess, op->sym->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
}
@@ -338,12 +337,14 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
* - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
*/
static JOB_AES_HMAC *
-process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_sym_op *c_op,
+process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
+ struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+ uint16_t m_offset = 0;
+
job = (*qp->ops->job.get_next)(&qp->mb_mgr);
if (unlikely(job == NULL))
return job;
@@ -372,49 +373,65 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
}
/* Mutable crypto operation parameters */
+ if (op->sym->m_dst) {
+ m_src = m_dst = op->sym->m_dst;
+
+ /* append space for output data to mbuf */
+ char *odata = rte_pktmbuf_append(m_dst,
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (odata == NULL)
+ MB_LOG_ERR("failed to allocate space in destination "
+ "mbuf for source data");
+
+ memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ } else {
+ m_dst = m_src;
+ m_offset = op->sym->cipher.data.offset;
+ }
/* Set digest output location */
if (job->cipher_direction == DECRYPT) {
- job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m,
+ job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
get_digest_byte_length(job->hash_alg));
- if (job->auth_tag_output)
- memset(job->auth_tag_output, 0,
- sizeof(get_digest_byte_length(job->hash_alg)));
- else
+ if (job->auth_tag_output == NULL) {
+ MB_LOG_ERR("failed to allocate space in output mbuf "
+ "for temp digest");
return NULL;
+ }
+
+ memset(job->auth_tag_output, 0,
+ sizeof(get_digest_byte_length(job->hash_alg)));
+
} else {
- job->auth_tag_output = c_op->digest.data;
+ job->auth_tag_output = op->sym->auth.digest.data;
}
/*
- * Multiple buffer library current only support returning a truncated
+ * Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
job->auth_tag_output_len_in_bytes =
get_truncated_digest_byte_length(job->hash_alg);
/* Set IV parameters */
- job->iv = c_op->iv.data;
- job->iv_len_in_bytes = c_op->iv.length;
+ job->iv = op->sym->cipher.iv.data;
+ job->iv_len_in_bytes = op->sym->cipher.iv.length;
/* Data Parameter */
- job->src = rte_pktmbuf_mtod(m, uint8_t *);
- job->dst = c_op->dst.m ?
- rte_pktmbuf_mtod(c_op->dst.m, uint8_t *) +
- c_op->dst.offset :
- rte_pktmbuf_mtod(m, uint8_t *) +
- c_op->data.to_cipher.offset;
+ job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
- job->cipher_start_src_offset_in_bytes = c_op->data.to_cipher.offset;
- job->msg_len_to_cipher_in_bytes = c_op->data.to_cipher.length;
+ job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
- job->hash_start_src_offset_in_bytes = c_op->data.to_hash.offset;
- job->msg_len_to_hash_in_bytes = c_op->data.to_hash.length;
+ job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
/* Set user data to be crypto operation data struct */
- job->user_data = m;
- job->user_data2 = c_op;
+ job->user_data = op;
+ job->user_data2 = m_dst;
return job;
}
@@ -429,43 +446,41 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
* verification of supplied digest in the case of a HASH_CIPHER operation
* - Returns NULL on invalid job
*/
-static struct rte_mbuf *
+static struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m;
- struct rte_crypto_sym_op *c_op;
+ struct rte_crypto_op *op =
+ (struct rte_crypto_op *)job->user_data;
+ struct rte_mbuf *m_dst =
+ (struct rte_mbuf *)job->user_data2;
- if (job->user_data == NULL)
+ if (op == NULL || m_dst == NULL)
return NULL;
- /* handled retrieved job */
- m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_sym_op *)job->user_data2;
-
/* set status as successful by default */
- c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* check if job has been processed */
if (unlikely(job->status != STS_COMPLETED)) {
- c_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return m;
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return op;
} else if (job->chain_order == HASH_CIPHER) {
/* Verify digest if required */
- if (memcmp(job->auth_tag_output, c_op->digest.data,
+ if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
job->auth_tag_output_len_in_bytes) != 0)
- c_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m, get_digest_byte_length(job->hash_alg));
+ rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
- rte_mempool_put(qp->sess_mp, c_op->session);
- c_op->session = NULL;
+ if (op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
}
- return m;
+ return op;
}
/**
@@ -481,16 +496,16 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
static unsigned
handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m = NULL;
+ struct rte_crypto_op *op = NULL;
unsigned processed_jobs = 0;
while (job) {
processed_jobs++;
- m = post_process_mb_job(qp, job);
- if (m)
- rte_ring_enqueue(qp->processed_pkts, (void *)m);
+ op = post_process_mb_job(qp, job);
+ if (op)
+ rte_ring_enqueue(qp->processed_pkts, (void *)op);
else
- qp->qp_stats.dequeue_err_count++;
+ qp->stats.dequeue_err_count++;
job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
}
@@ -499,53 +514,53 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
static uint16_t
-aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
- uint16_t nb_bufs)
+aesni_mb_pmd_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
-
struct aesni_mb_session *sess;
- struct aesni_mb_qp *qp = queue_pair;
+ struct aesni_mb_qp *_qp = qp;
JOB_AES_HMAC *job = NULL;
int i, processed_jobs = 0;
- for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i],
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- if (unlikely(ol == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ for (i = 0; i < nb_ops; i++) {
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ MB_LOG_ERR("PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
-
- sess = get_session(qp, &ol->op.crypto);
+#endif
+ sess = get_session(_qp, ops[i]);
if (unlikely(sess == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
- job = process_crypto_op(qp, bufs[i], &ol->op.crypto, sess);
+ job = process_crypto_op(_qp, ops[i], sess);
if (unlikely(job == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
/* Submit Job */
- job = (*qp->ops->job.submit)(&qp->mb_mgr);
+ job = (*_qp->ops->job.submit)(&_qp->mb_mgr);
/*
* If submit returns a processed job then handle it,
* before submitting subsequent jobs
*/
if (job)
- processed_jobs += handle_completed_jobs(qp, job);
+ processed_jobs += handle_completed_jobs(_qp, job);
}
if (processed_jobs == 0)
goto flush_jobs;
else
- qp->qp_stats.enqueued_count += processed_jobs;
+ _qp->stats.enqueued_count += processed_jobs;
return i;
flush_jobs:
@@ -553,24 +568,24 @@ flush_jobs:
* If we haven't processed any jobs in submit loop, then flush jobs
* queue to stop the output stalling
*/
- job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
+ job = (*_qp->ops->job.flush_job)(&_qp->mb_mgr);
if (job)
- qp->qp_stats.enqueued_count += handle_completed_jobs(qp, job);
+ _qp->stats.enqueued_count += handle_completed_jobs(_qp, job);
return i;
}
static uint16_t
-aesni_mb_pmd_dequeue_burst(void *queue_pair,
- struct rte_mbuf **bufs, uint16_t nb_bufs)
+aesni_mb_pmd_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct aesni_mb_qp *qp = queue_pair;
+ struct aesni_mb_qp *_qp = qp;
unsigned nb_dequeued;
- nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)bufs, nb_bufs);
- qp->qp_stats.dequeued_count += nb_dequeued;
+ nb_dequeued = rte_ring_dequeue_burst(_qp->processed_pkts,
+ (void **)ops, nb_ops);
+ _qp->stats.dequeued_count += nb_dequeued;
return nb_dequeued;
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 3cd9990..d56de12 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -76,11 +76,11 @@ aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- stats->enqueued_count += qp->qp_stats.enqueued_count;
- stats->dequeued_count += qp->qp_stats.dequeued_count;
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
- stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
- stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
}
}
@@ -93,7 +93,7 @@ aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
}
}
@@ -196,7 +196,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp->sess_mp = dev->data->session_pool;
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
/* Initialise multi-buffer manager */
(*qp->ops->job.init_mgr)(&qp->mb_mgr);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index ab70c15..0aed177 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -150,7 +150,7 @@ struct aesni_mb_qp {
/**< Ring for placing process packets */
struct rte_mempool *sess_mp;
/**< Session Mempool */
- struct rte_cryptodev_stats qp_stats;
+ struct rte_cryptodev_stats stats;
/**< Queue pair statistics */
} __rte_cache_aligned;
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index e7b9027..38dc956 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -59,7 +59,6 @@
#include <rte_mbuf.h>
#include <rte_string_fns.h>
#include <rte_spinlock.h>
-#include <rte_mbuf_offload.h>
#include <rte_hexdump.h>
#include "qat_logs.h"
@@ -72,7 +71,7 @@ static inline uint32_t
adf_modulo(uint32_t data, uint32_t shift);
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg);
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
void *session)
@@ -275,15 +274,16 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
register struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
register uint32_t nb_pkts_sent = 0;
- register struct rte_mbuf **cur_tx_pkt = tx_pkts;
+ register struct rte_crypto_op **cur_op = ops;
register int ret;
- uint16_t nb_pkts_possible = nb_pkts;
+ uint16_t nb_ops_possible = nb_ops;
register uint8_t *base_addr;
register uint32_t tail;
int overflow;
@@ -294,19 +294,17 @@ uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = queue->tail;
/* Find how many can actually fit on the ring */
- overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_pkts)
+ overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
- queue->max_inflights;
if (overflow > 0) {
rte_atomic16_sub(&tmp_qp->inflights16, overflow);
- nb_pkts_possible = nb_pkts - overflow;
- if (nb_pkts_possible == 0)
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
return 0;
}
- while (nb_pkts_sent != nb_pkts_possible) {
-
- ret = qat_alg_write_mbuf_entry(*cur_tx_pkt,
- base_addr + tail);
+ while (nb_pkts_sent != nb_ops_possible) {
+ ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
if (nb_pkts_sent == 0)
@@ -316,7 +314,7 @@ uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = adf_modulo(tail + queue->msg_size, queue->modulo);
nb_pkts_sent++;
- cur_tx_pkt++;
+ cur_op++;
}
kick_tail:
WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
@@ -327,14 +325,13 @@ kick_tail:
}
uint16_t
-qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
uint32_t msg_counter = 0;
- struct rte_mbuf *rx_mbuf;
+ struct rte_crypto_op *rx_op;
struct icp_qat_fw_comn_resp *resp_msg;
queue = &(tmp_qp->rx_q);
@@ -342,17 +339,20 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
((uint8_t *)queue->base_addr + queue->head);
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
- msg_counter != nb_pkts) {
- rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
+ msg_counter != nb_ops) {
+ rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
*(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
queue->head = adf_modulo(queue->head +
@@ -361,9 +361,8 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
resp_msg = (struct icp_qat_fw_comn_resp *)
((uint8_t *)queue->base_addr +
queue->head);
-
- *rx_pkts = rx_mbuf;
- rx_pkts++;
+ *ops = rx_op;
+ ops++;
msg_counter++;
}
if (msg_counter > 0) {
@@ -377,38 +376,36 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
}
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
{
- struct rte_mbuf_offload *ol;
-
struct qat_session *ctx;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
- if (unlikely(ol == NULL)) {
- PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
- "to (%p) mbuf.", mbuf);
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
return -EINVAL;
}
-
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
+#endif
+ if (unlikely(op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
- " requests mbuf (%p) is sessionless.", mbuf);
+ " requests, op (%p) is sessionless.", op);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type
- != RTE_CRYPTODEV_QAT_SYM_PMD)) {
+ if (unlikely(op->sym->session->type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
- ctx = (struct qat_session *)ol->op.crypto.session->_private;
+ ctx = (struct qat_session *)op->sym->session->_private;
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
*qat_req = ctx->fw_req;
- qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)mbuf;
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
/*
* The following code assumes:
@@ -416,37 +413,37 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
* - always in place.
*/
qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length = mbuf->data_len;
+ qat_req->comn_mid.src_length =
+ rte_pktmbuf_data_len(op->sym->m_src);
qat_req->comn_mid.dest_data_addr =
qat_req->comn_mid.src_data_addr =
- rte_pktmbuf_mtophys(mbuf);
-
+ rte_pktmbuf_mtophys(op->sym->m_src);
cipher_param = (void *)&qat_req->serv_specif_rqpars;
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
- cipher_param->cipher_length = ol->op.crypto.data.to_cipher.length;
- cipher_param->cipher_offset = ol->op.crypto.data.to_cipher.offset;
- if (ol->op.crypto.iv.length &&
- (ol->op.crypto.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array))) {
+ cipher_param->cipher_length = op->sym->cipher.data.length;
+ cipher_param->cipher_offset = op->sym->cipher.data.offset;
+ if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array))) {
rte_memcpy(cipher_param->u.cipher_IV_array,
- ol->op.crypto.iv.data, ol->op.crypto.iv.length);
+ op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
} else {
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr = ol->op.crypto.iv.phys_addr;
+ cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr;
}
- if (ol->op.crypto.digest.phys_addr) {
+ if (op->sym->auth.digest.phys_addr) {
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
- auth_param->auth_res_addr = ol->op.crypto.digest.phys_addr;
+ auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
}
- auth_param->auth_off = ol->op.crypto.data.to_hash.offset;
- auth_param->auth_len = ol->op.crypto.data.to_hash.length;
- auth_param->u1.aad_adr = ol->op.crypto.additional_auth.phys_addr;
+ auth_param->auth_off = op->sym->auth.data.offset;
+ auth_param->auth_len = op->sym->auth.data.length;
+ auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
/* (GCM) aad length(240 max) will be at this location after precompute */
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
@@ -457,9 +454,19 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
}
auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));
+ rte_hexdump(stdout, "src_data:",
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ op->sym->auth.digest.length);
+ rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
+ op->sym->auth.aad.length);
#endif
return 0;
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index e9f71fe..9323383 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -115,12 +115,12 @@ extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
-uint16_t
-qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
-uint16_t
-qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 85700fc..5e51aca 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -92,8 +92,8 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst;
internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 93de786..6d5aeeb 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -62,7 +62,6 @@
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_memcpy.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -85,6 +84,7 @@
*/
#define RTE_TEST_RX_DESC_DEFAULT 128
#define RTE_TEST_TX_DESC_DEFAULT 512
+
static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
@@ -104,6 +104,11 @@ struct pkt_buffer {
struct rte_mbuf *buffer[MAX_PKT_BURST];
};
+struct op_buffer {
+ unsigned len;
+ struct rte_crypto_op *buffer[MAX_PKT_BURST];
+};
+
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
@@ -112,6 +117,12 @@ enum l2fwd_crypto_xform_chain {
L2FWD_CRYPTO_HASH_CIPHER
};
+struct l2fwd_key {
+ uint8_t *data;
+ uint32_t length;
+ phys_addr_t phys_addr;
+};
+
/** l2fwd crypto application command line options */
struct l2fwd_crypto_options {
unsigned portmask;
@@ -127,7 +138,7 @@ struct l2fwd_crypto_options {
struct rte_crypto_sym_xform cipher_xform;
uint8_t ckey_data[32];
- struct rte_crypto_sym_key iv_key;
+ struct l2fwd_key iv_key;
uint8_t ivkey_data[16];
struct rte_crypto_sym_xform auth_xform;
@@ -142,10 +153,7 @@ struct l2fwd_crypto_params {
unsigned digest_length;
unsigned block_size;
- struct {
- uint8_t *data;
- uint16_t length;
- } iv;
+ struct l2fwd_key iv_key;
struct rte_cryptodev_sym_session *session;
};
@@ -157,14 +165,16 @@ struct lcore_queue_conf {
unsigned nb_crypto_devs;
unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
- struct pkt_buffer crypto_pkt_buf[RTE_MAX_ETHPORTS];
- struct pkt_buffer tx_pkt_buf[RTE_MAX_ETHPORTS];
+ struct op_buffer op_buf[RTE_MAX_ETHPORTS];
+ struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
} __rte_cache_aligned;
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
static const struct rte_eth_conf port_conf = {
.rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+ .max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
@@ -178,7 +188,7 @@ static const struct rte_eth_conf port_conf = {
};
struct rte_mempool *l2fwd_pktmbuf_pool;
-struct rte_mempool *l2fwd_mbuf_ol_pool;
+struct rte_mempool *l2fwd_crypto_op_pool;
/* Per-port statistics struct */
struct l2fwd_port_statistics {
@@ -203,7 +213,7 @@ struct l2fwd_crypto_statistics crypto_statistics[RTE_MAX_ETHPORTS];
/* A tsc-based timer responsible for triggering statistics printout */
#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
-#define MAX_TIMER_PERIOD 86400 /* 1 day max */
+#define MAX_TIMER_PERIOD 86400UL /* 1 day max */
/* default period is 10 seconds */
static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000;
@@ -292,20 +302,21 @@ static int
l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
struct l2fwd_crypto_params *cparams)
{
- struct rte_mbuf **pkt_buffer;
+ struct rte_crypto_op **op_buffer;
unsigned ret;
- pkt_buffer = (struct rte_mbuf **)
- qconf->crypto_pkt_buf[cparams->dev_id].buffer;
+ op_buffer = (struct rte_crypto_op **)
+ qconf->op_buf[cparams->dev_id].buffer;
+
+ ret = rte_cryptodev_enqueue_burst(cparams->dev_id,
+ cparams->qp_id, op_buffer, (uint16_t) n);
- ret = rte_cryptodev_enqueue_burst(cparams->dev_id, cparams->qp_id,
- pkt_buffer, (uint16_t) n);
crypto_statistics[cparams->dev_id].enqueued += ret;
if (unlikely(ret < n)) {
crypto_statistics[cparams->dev_id].errors += (n - ret);
do {
- rte_pktmbuf_offload_free(pkt_buffer[ret]->offload_ops);
- rte_pktmbuf_free(pkt_buffer[ret]);
+ rte_pktmbuf_free(op_buffer[ret]->sym->m_src);
+ rte_crypto_op_free(op_buffer[ret]);
} while (++ret < n);
}
@@ -313,7 +324,8 @@ l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
}
static int
-l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
+l2fwd_crypto_enqueue(struct rte_crypto_op *op,
+ struct l2fwd_crypto_params *cparams)
{
unsigned lcore_id, len;
struct lcore_queue_conf *qconf;
@@ -321,23 +333,23 @@ l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->crypto_pkt_buf[cparams->dev_id].len;
- qconf->crypto_pkt_buf[cparams->dev_id].buffer[len] = m;
+ len = qconf->op_buf[cparams->dev_id].len;
+ qconf->op_buf[cparams->dev_id].buffer[len] = op;
len++;
- /* enough pkts to be sent */
+ /* enough ops to be sent */
if (len == MAX_PKT_BURST) {
l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams);
len = 0;
}
- qconf->crypto_pkt_buf[cparams->dev_id].len = len;
+ qconf->op_buf[cparams->dev_id].len = len;
return 0;
}
static int
l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
- struct rte_mbuf_offload *ol,
+ struct rte_crypto_op *op,
struct l2fwd_crypto_params *cparams)
{
struct ether_hdr *eth_hdr;
@@ -375,43 +387,43 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_op_attach_sym_session(op, cparams->session);
/* Append space for digest to end of packet */
- ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
+ op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
cparams->digest_length);
- ol->op.crypto.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
- ol->op.crypto.digest.length = cparams->digest_length;
+ op->sym->auth.digest.length = cparams->digest_length;
+
+ op->sym->auth.data.offset = ipdata_offset;
+ op->sym->auth.data.length = data_len;
- ol->op.crypto.iv.data = cparams->iv_key.data;
- ol->op.crypto.iv.phys_addr = cparams->iv_key.phys_addr;
- ol->op.crypto.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_cipher.offset = ipdata_offset;
- ol->op.crypto.data.to_cipher.length = data_len;
+ op->sym->cipher.iv.data = cparams->iv_key.data;
+ op->sym->cipher.iv.phys_addr = cparams->iv_key.phys_addr;
+ op->sym->cipher.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_hash.offset = ipdata_offset;
- ol->op.crypto.data.to_hash.length = data_len;
+ op->sym->cipher.data.offset = ipdata_offset;
+ op->sym->cipher.data.length = data_len;
- rte_pktmbuf_offload_attach(m, ol);
+ op->sym->m_src = m;
- return l2fwd_crypto_enqueue(m, cparams);
+ return l2fwd_crypto_enqueue(op, cparams);
}
/* Send the burst of packets on an output interface */
static int
-l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)
+l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
+ uint8_t port)
{
struct rte_mbuf **pkt_buffer;
unsigned ret;
- unsigned queueid = 0;
- pkt_buffer = (struct rte_mbuf **)qconf->tx_pkt_buf[port].buffer;
+ pkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer;
- ret = rte_eth_tx_burst(port, (uint16_t) queueid, pkt_buffer,
- (uint16_t)n);
+ ret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n);
port_statistics[port].tx += ret;
if (unlikely(ret < n)) {
port_statistics[port].dropped += (n - ret);
@@ -433,8 +445,8 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->tx_pkt_buf[port].len;
- qconf->tx_pkt_buf[port].buffer[len] = m;
+ len = qconf->pkt_buf[port].len;
+ qconf->pkt_buf[port].buffer[len] = m;
len++;
/* enough pkts to be sent */
@@ -443,7 +455,7 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
len = 0;
}
- qconf->tx_pkt_buf[port].len = len;
+ qconf->pkt_buf[port].len = len;
return 0;
}
@@ -503,6 +515,8 @@ static void
l2fwd_main_loop(struct l2fwd_crypto_options *options)
{
struct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST];
+ struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
+
unsigned lcore_id = rte_lcore_id();
uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
unsigned i, j, portid, nb_rx;
@@ -563,12 +577,12 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
if (unlikely(diff_tsc > drain_tsc)) {
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- if (qconf->tx_pkt_buf[portid].len == 0)
+ if (qconf->pkt_buf[portid].len == 0)
continue;
l2fwd_send_burst(&lcore_queue_conf[lcore_id],
- qconf->tx_pkt_buf[portid].len,
+ qconf->pkt_buf[portid].len,
(uint8_t) portid);
- qconf->tx_pkt_buf[portid].len = 0;
+ qconf->pkt_buf[portid].len = 0;
}
/* if timer is enabled */
@@ -597,8 +611,6 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
* Read packet from RX queues
*/
for (i = 0; i < qconf->nb_rx_ports; i++) {
- struct rte_mbuf_offload *ol;
-
portid = qconf->rx_port_list[i];
cparams = &port_cparams[i];
@@ -608,44 +620,49 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
port_statistics[portid].rx += nb_rx;
- /* Enqueue packets from Crypto device*/
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- ol = rte_pktmbuf_offload_alloc(
- l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
+ if (nb_rx) {
/*
- * If we can't allocate a offload, then drop
+ * If we can't allocate a crypto_ops, then drop
* the rest of the burst and dequeue and
* process the packets to free offload structs
*/
- if (unlikely(ol == NULL)) {
- for (; j < nb_rx; j++) {
- rte_pktmbuf_free(pkts_burst[j]);
- port_statistics[portid].dropped++;
- }
- break;
+ if (rte_crypto_op_bulk_alloc(
+ l2fwd_crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops_burst, nb_rx) !=
+ nb_rx) {
+ for (j = 0; j < nb_rx; j++)
+ rte_pktmbuf_free(pkts_burst[i]);
+
+ nb_rx = 0;
}
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- rte_prefetch0((void *)ol);
+ /* Enqueue packets from Crypto device*/
+ for (j = 0; j < nb_rx; j++) {
+ m = pkts_burst[j];
- l2fwd_simple_crypto_enqueue(m, ol, cparams);
+ l2fwd_simple_crypto_enqueue(m,
+ ops_burst[j], cparams);
+ }
}
/* Dequeue packets from Crypto device */
- nb_rx = rte_cryptodev_dequeue_burst(
- cparams->dev_id, cparams->qp_id,
- pkts_burst, MAX_PKT_BURST);
- crypto_statistics[cparams->dev_id].dequeued += nb_rx;
-
- /* Forward crypto'd packets */
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- rte_pktmbuf_offload_free(m->offload_ops);
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- l2fwd_simple_forward(m, portid);
- }
+ do {
+ nb_rx = rte_cryptodev_dequeue_burst(
+ cparams->dev_id, cparams->qp_id,
+ ops_burst, MAX_PKT_BURST);
+
+ crypto_statistics[cparams->dev_id].dequeued +=
+ nb_rx;
+
+ /* Forward crypto'd packets */
+ for (j = 0; j < nb_rx; j++) {
+ m = ops_burst[j]->sym->m_src;
+
+ rte_crypto_op_free(ops_burst[j]);
+ l2fwd_simple_forward(m, portid);
+ }
+ } while (nb_rx == MAX_PKT_BURST);
}
}
}
@@ -748,8 +765,8 @@ parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
/** Parse crypto key command line argument */
static int
-parse_key(struct rte_crypto_sym_key *key __rte_unused,
- unsigned length __rte_unused, char *arg __rte_unused)
+parse_key(struct l2fwd_key *key __rte_unused, unsigned length __rte_unused,
+ char *arg __rte_unused)
{
printf("Currently an unsupported argument!\n");
return -1;
@@ -759,26 +776,20 @@ parse_key(struct rte_crypto_sym_key *key __rte_unused,
static int
parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg)
{
- if (strcmp("SHA1", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA1;
+ if (strcmp("MD5_HMAC", optarg) == 0) {
+ *algo = RTE_CRYPTO_AUTH_MD5_HMAC;
return 0;
} else if (strcmp("SHA1_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
return 0;
- } else if (strcmp("SHA224", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA224;
- return 0;
} else if (strcmp("SHA224_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
return 0;
- } else if (strcmp("SHA256", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA256;
- return 0;
} else if (strcmp("SHA256_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
return 0;
- } else if (strcmp("SHA512", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA256;
+ } else if (strcmp("SHA384_HMAC", optarg) == 0) {
+ *algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
return 0;
} else if (strcmp("SHA512_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
@@ -809,7 +820,7 @@ static int
l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
struct option *lgopts, int option_index)
{
- if (strcmp(lgopts[option_index].name, "cdev_type") == 0)
+ if (strcmp(lgopts[option_index].name, "cdev") == 0)
return parse_cryptodev_type(&options->cdev_type, optarg);
else if (strcmp(lgopts[option_index].name, "chain") == 0)
@@ -824,11 +835,17 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return parse_cipher_op(&options->cipher_xform.cipher.op,
optarg);
- else if (strcmp(lgopts[option_index].name, "cipher_key") == 0)
- return parse_key(&options->cipher_xform.cipher.key,
- sizeof(options->ckey_data), optarg);
+ else if (strcmp(lgopts[option_index].name, "cipher_key") == 0) {
+ struct l2fwd_key key = { 0 };
+ int retval = 0;
+
+ retval = parse_key(&key, sizeof(options->ckey_data), optarg);
- else if (strcmp(lgopts[option_index].name, "iv") == 0)
+ options->cipher_xform.cipher.key.data = key.data;
+ options->cipher_xform.cipher.key.length = key.length;
+
+ return retval;
+ } else if (strcmp(lgopts[option_index].name, "iv") == 0)
return parse_key(&options->iv_key, sizeof(options->ivkey_data),
optarg);
@@ -841,11 +858,17 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return parse_auth_op(&options->auth_xform.auth.op,
optarg);
- else if (strcmp(lgopts[option_index].name, "auth_key") == 0)
- return parse_key(&options->auth_xform.auth.key,
- sizeof(options->akey_data), optarg);
+ else if (strcmp(lgopts[option_index].name, "auth_key") == 0) {
+ struct l2fwd_key key = { 0 };
+ int retval = 0;
+
+ retval = parse_key(&key, sizeof(options->akey_data), optarg);
- else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
+ options->auth_xform.auth.key.data = key.data;
+ options->auth_xform.auth.key.length = key.length;
+
+ return retval;
+ } else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
options->sessionless = 1;
return 0;
}
@@ -905,16 +928,16 @@ l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options,
const char *q_arg)
{
char *end = NULL;
- long int n;
+ unsigned long n;
/* parse number string */
- n = strtol(q_arg, &end, 10);
+ n = (unsigned)strtol(q_arg, &end, 10);
if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
n = 0;
if (n >= MAX_TIMER_PERIOD) {
- printf("Warning refresh period specified %ld is greater than "
- "max value %d! using max value",
+ printf("Warning refresh period specified %lu is greater than "
+ "max value %lu! using max value",
n, MAX_TIMER_PERIOD);
n = MAX_TIMER_PERIOD;
}
@@ -934,13 +957,13 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
options->nb_ports_per_lcore = 1;
options->refresh_period = 10000;
options->single_lcore = 0;
+ options->sessionless = 0;
options->cdev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
- options->sessionless = 0;
options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
/* Cipher Data */
- options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ options->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
options->cipher_xform.next = NULL;
options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -949,12 +972,11 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
generate_random_key(options->ckey_data, sizeof(options->ckey_data));
options->cipher_xform.cipher.key.data = options->ckey_data;
- options->cipher_xform.cipher.key.phys_addr = 0;
options->cipher_xform.cipher.key.length = 16;
/* Authentication Data */
- options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ options->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
options->auth_xform.next = NULL;
options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
@@ -966,7 +988,6 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
generate_random_key(options->akey_data, sizeof(options->akey_data));
options->auth_xform.auth.key.data = options->akey_data;
- options->auth_xform.auth.key.phys_addr = 0;
options->auth_xform.auth.key.length = 20;
}
@@ -993,39 +1014,6 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
printf("sessionless crypto: %s\n",
options->sessionless ? "enabled" : "disabled");
-#if 0
- options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
-
- /* Cipher Data */
- options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
- options->cipher_xform.next = NULL;
-
- options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- options->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
-
- generate_random_key(options->ckey_data, sizeof(options->ckey_data));
-
- options->cipher_xform.cipher.key.data = options->ckey_data;
- options->cipher_xform.cipher.key.phys_addr = 0;
- options->cipher_xform.cipher.key.length = 16;
-
-
- /* Authentication Data */
- options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
- options->auth_xform.next = NULL;
-
- options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
-
- options->auth_xform.auth.add_auth_data_length = 0;
- options->auth_xform.auth.digest_length = 20;
-
- generate_random_key(options->akey_data, sizeof(options->akey_data));
-
- options->auth_xform.auth.key.data = options->akey_data;
- options->auth_xform.auth.key.phys_addr = 0;
- options->auth_xform.auth.key.length = 20;
-#endif
}
/* Parse the argument given in the command line of the application */
@@ -1039,7 +1027,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
static struct option lgopts[] = {
{ "sessionless", no_argument, 0, 0 },
- { "cdev_type", required_argument, 0, 0 },
+ { "cdev", required_argument, 0, 0 },
{ "chain", required_argument, 0, 0 },
{ "cipher_algo", required_argument, 0, 0 },
@@ -1053,6 +1041,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
{ "iv", required_argument, 0, 0 },
{ "sessionless", no_argument, 0, 0 },
+
{ NULL, 0, 0, 0 }
};
@@ -1370,15 +1359,17 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n");
/* create the mbuf pool */
- l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 128,
- 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512,
+ sizeof(struct rte_crypto_op),
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
if (l2fwd_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
/* create crypto op pool */
- l2fwd_mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "mbuf_offload_pool", NB_MBUF, 128, 0, rte_socket_id());
- if (l2fwd_mbuf_ol_pool == NULL)
+ l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, 0,
+ rte_socket_id());
+ if (l2fwd_crypto_op_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
/* Enable Ethernet ports */
diff --git a/lib/Makefile b/lib/Makefile
index ef172ea..4c5c1b4 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -36,7 +36,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_EAL) += librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_RING) += librte_ring
DIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += librte_mempool
DIRS-$(CONFIG_RTE_LIBRTE_MBUF) += librte_mbuf
-DIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += librte_mbuf_offload
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += librte_timer
DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile
DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index df0c0b8..e0c4fb9 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -44,7 +44,369 @@
extern "C" {
#endif
-#include <rte_crypto_sym.h>
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+#include "rte_crypto_sym.h"
+
+/** Crypto operation types */
+enum rte_crypto_op_type {
+ RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ /**< Undefined operation type */
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ /**< Symmetric operation */
+};
+
+/** Status of crypto operation */
+enum rte_crypto_op_status {
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ /**< Operation completed successfully */
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+ /**< Operation has not yet been processed by a crypto device */
+ RTE_CRYPTO_OP_STATUS_ENQUEUED,
+ /**< Operation is enqueued on device */
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
+ /**< Authentication verification failed */
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION,
+ /**<
+ * Symmetric operation failed due to invalid session arguments, or if
+ * in session-less mode, failed to allocate private operation material.
+ */
+ RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
+ /**< Operation failed due to invalid arguments in request */
+ RTE_CRYPTO_OP_STATUS_ERROR,
+ /**< Error handling operation */
+};
+
+/**
+ * Cryptographic Operation.
+ *
+ * This structure contains data relating to performing cryptographic
+ * operations. This operation structure is used to contain any operation which
+ * is supported by the cryptodev API, PMDs should check the type parameter to
+ * verify that the operation is a support function of the device. Crypto
+ * operations are enqueued and dequeued in crypto PMDs using the
+ * rte_cryptodev_enqueue_burst() / rte_cryptodev_dequeue_burst() .
+ */
+struct rte_crypto_op {
+ enum rte_crypto_op_type type;
+ /**< operation type */
+
+ enum rte_crypto_op_status status;
+ /**<
+ * operation status - this is reset to
+ * RTE_CRYPTO_OP_STATUS_NOT_PROCESSED on allocation from mempool and
+ * will be set to RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation
+ * is successfully processed by a crypto PMD
+ */
+
+ struct rte_mempool *mempool;
+ /**< crypto operation mempool which operation is allocated from */
+
+ phys_addr_t phys_addr;
+ /**< physical address of crypto operation */
+
+ void *opaque_data;
+ /**< Opaque pointer for user data */
+
+ union {
+ struct rte_crypto_sym_op *sym;
+ /**< Symmetric operation parameters */
+ }; /**< operation specific parameters */
+} __rte_cache_aligned;
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ * @param type The crypto operation type.
+ */
+static inline void
+__rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type)
+{
+ op->type = type;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (type) {
+ case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
+ /** Symmetric operation structure starts after the end of the
+ * rte_crypto_op structure.
+ */
+ op->sym = (struct rte_crypto_sym_op *)(op + 1);
+ op->type = type;
+
+ __rte_crypto_sym_op_reset(op->sym);
+ break;
+ default:
+ break;
+ }
+
+ op->opaque_data = NULL;
+}
+
+/**
+ * Private data structure belonging to a crypto symmetric operation pool.
+ */
+struct rte_crypto_op_pool_private {
+ enum rte_crypto_op_type type;
+ /**< Crypto op pool type operation. */
+ uint16_t priv_size;
+ /**< Size of private area in each crypto operation. */
+};
+
+
+/**
+ * Returns the size of private data allocated with each rte_crypto_op object by
+ * the mempool
+ *
+ * @param mempool rte_crypto_op mempool
+ *
+ * @return private data size
+ */
+static inline uint16_t
+__rte_crypto_op_get_priv_data_size(struct rte_mempool *mempool)
+{
+ struct rte_crypto_op_pool_private *priv =
+ rte_mempool_get_priv(mempool);
+
+ return priv->priv_size;
+}
+
+
+/**
+ * Creates a crypto operation pool
+ *
+ * @param name pool name
+ * @param type crypto operation type, use
+ * RTE_CRYPTO_OP_TYPE_UNDEFINED for a pool which
+ * supports all operation types
+ * @param nb_elts number of elements in pool
+ * @param cache_size Number of elements to cache on lcore, see
+ * *rte_mempool_create* for further details about
+ * cache size
+ * @param priv_size Size of private data to allocate with each
+ * operation
+ * @param socket_id Socket to allocate memory on
+ *
+ * @return
+ * - On success pointer to mempool
+ * - On failure NULL
+ */
+extern struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id);
+
+/**
+ * Bulk allocate raw element from mempool and return as crypto operations
+ *
+ * @param mempool crypto operation mempool.
+ * @param type crypto operation type.
+ * @param ops Array to place allocated crypto operations
+ * @param nb_ops Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns number of ops allocated
+ */
+static inline int
+__rte_crypto_op_raw_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_crypto_op_pool_private *priv;
+
+ priv = rte_mempool_get_priv(mempool);
+ if (unlikely(priv->type != type &&
+ priv->type != RTE_CRYPTO_OP_TYPE_UNDEFINED))
+ return -EINVAL;
+
+ if (rte_mempool_get_bulk(mempool, (void **)ops, nb_ops) == 0)
+ return nb_ops;
+
+ return 0;
+}
+
+/**
+ * Allocate a crypto operation from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_op *
+rte_crypto_op_alloc(struct rte_mempool *mempool, enum rte_crypto_op_type type)
+{
+ struct rte_crypto_op *op = NULL;
+ int retval;
+
+ retval = __rte_crypto_op_raw_bulk_alloc(mempool, type, &op, 1);
+ if (unlikely(retval != 1))
+ return NULL;
+
+ __rte_crypto_op_reset(op, type);
+
+ return op;
+}
+
+
+/**
+ * Bulk allocate crypto operations from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ * @param ops Array to place allocated crypto operations
+ * @param nb_ops Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+
+static inline unsigned
+rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ if (unlikely(__rte_crypto_op_raw_bulk_alloc(mempool, type, ops, nb_ops)
+ != nb_ops))
+ return 0;
+
+ for (i = 0; i < nb_ops; i++)
+ __rte_crypto_op_reset(ops[i], type);
+
+ return nb_ops;
+}
+
+
+
+/**
+ * Returns a pointer to the private data of a crypto operation if
+ * that operation has enough capacity for requested size.
+ *
+ * @param op crypto operation.
+ * @param size size of space requested in private data.
+ *
+ * @returns
+ * - if sufficient space available returns pointer to start of private data
+ * - if insufficient space returns NULL
+ */
+static inline void *
+__rte_crypto_op_get_priv_data(struct rte_crypto_op *op, uint32_t size)
+{
+ uint32_t priv_size;
+
+ if (likely(op->mempool != NULL)) {
+ priv_size = __rte_crypto_op_get_priv_data_size(op->mempool);
+
+ if (likely(priv_size >= size))
+ return (void *)((uint8_t *)(op + 1) +
+ sizeof(struct rte_crypto_sym_op));
+ }
+
+ return NULL;
+}
+
+/**
+ * free crypto operation structure
+ * If operation has been allocate from a rte_mempool, then the operation will
+ * be returned to the mempool.
+ *
+ * @param op symmetric crypto operation
+ */
+static inline void
+rte_crypto_op_free(struct rte_crypto_op *op)
+{
+ if (op != NULL && op->mempool != NULL)
+ rte_mempool_put(op->mempool, op);
+}
+
+/**
+ * Allocate a symmetric crypto operation in the private data of an mbuf.
+ *
+ * @param m mbuf which is associated with the crypto operation, the
+ * operation will be allocated in the private data of that
+ * mbuf.
+ *
+ * @returns
+ * - On success returns a pointer to the crypto operation.
+ * - On failure returns NULL.
+ */
+static inline struct rte_crypto_op *
+rte_crypto_sym_op_alloc_from_mbuf_priv_data(struct rte_mbuf *m)
+{
+ if (unlikely(m == NULL))
+ return NULL;
+
+ /*
+ * check that the mbuf's private data size is sufficient to contain a
+ * crypto operation
+ */
+ if (unlikely(m->priv_size < (sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op))))
+ return NULL;
+
+ /* private data starts immediately after the mbuf header in the mbuf. */
+ struct rte_crypto_op *op = (struct rte_crypto_op *)(m + 1);
+
+ __rte_crypto_op_reset(op, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+
+ op->mempool = NULL;
+ op->sym->m_src = m;
+
+ return op;
+}
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type and configures
+ * the chaining of the xforms in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+rte_crypto_op_sym_xforms_alloc(struct rte_crypto_op *op, uint8_t nb_xforms)
+{
+ void *priv_data;
+ uint32_t size;
+
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return NULL;
+
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
+
+ priv_data = __rte_crypto_op_get_priv_data(op, size);
+ if (priv_data == NULL)
+ return NULL;
+
+ return __rte_crypto_sym_op_sym_xforms_alloc(op->sym, priv_data,
+ nb_xforms);
+}
+
+
+/**
+ * Attach a session to a crypto operation
+ *
+ * @param op crypto operation, must be of type symmetric
+ * @param sess cryptodev session
+ */
+static inline int
+rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return -1;
+
+ return __rte_crypto_sym_op_attach_sym_session(op->sym, sess);
+}
#ifdef __cplusplus
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index cb2b8f6..831bbf1 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -46,6 +46,8 @@
extern "C" {
#endif
+#include <string.h>
+
#include <rte_mbuf.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -346,37 +348,24 @@ enum rte_crypto_sym_op_sess_type {
};
-/** Status of crypto operation */
-enum rte_crypto_op_status {
- RTE_CRYPTO_OP_STATUS_SUCCESS,
- /**< Operation completed successfully */
- RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
- /**< Operation not yet submitted to a cryptodev */
- RTE_CRYPTO_OP_STATUS_ENQUEUED,
- /**< Operation is enqueued on device */
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
- /**< Authentication verification failed */
- RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
- /**< Operation failed due to invalid arguments in request */
- RTE_CRYPTO_OP_STATUS_ERROR,
- /**< Error handling operation */
-};
+struct rte_cryptodev_sym_session;
/**
- * Cryptographic Operation Data.
+ * Symmetric Cryptographic Operation.
+ *
+ * This structure contains data relating to performing symmetric cryptographic
+ * processing on a referenced mbuf data buffer.
*
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
- * call for performing cipher, hash, or a combined hash and cipher operations.
+ * When a symmetric crypto operation is enqueued with the device for processing
+ * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
+ * which contains the source data which the crypto operation is to be performed
+ * on.
*/
struct rte_crypto_sym_op {
- enum rte_crypto_sym_op_sess_type type;
- enum rte_crypto_op_status status;
+ struct rte_mbuf *m_src; /**< source mbuf */
+ struct rte_mbuf *m_dst; /**< destination mbuf */
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
+ enum rte_crypto_sym_op_sess_type type;
union {
struct rte_cryptodev_sym_session *session;
@@ -387,7 +376,7 @@ struct rte_crypto_sym_op {
struct {
struct {
- uint32_t offset;
+ uint32_t offset;
/**< Starting point for cipher processing, specified
* as number of bytes from start of data in the source
* buffer. The result of the cipher operation will be
@@ -395,7 +384,7 @@ struct rte_crypto_sym_op {
* this location.
*/
- uint32_t length;
+ uint32_t length;
/**< The message length, in bytes, of the source buffer
* on which the cryptographic operation will be
* computed. This must be a multiple of the block size
@@ -414,17 +403,68 @@ struct rte_crypto_sym_op {
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
* field should be set to 0.
*/
- } to_cipher; /**< Data offsets and length for ciphering */
+ } data; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * Initialisation Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length
+ * is 96 bits) or J0 (for other sizes), where J0 is as
+ * defined by NIST SP800-38D. Regardless of the IV
+ * length, a full 16 bytes needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the
+ * nonce should be written starting at &iv[1] (to allow
+ * space for the implementation to write in the flags
+ * in the first byte). Note that a full 16 bytes should
+ * be allocated, even though the length field will
+ * have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from
+ * IEEE Std 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD
+ * be 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ uint16_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * length of the IV (which must be the same as the
+ * block length of the cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length
+ * of the counter (which must be the same as the block
+ * length of the cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs)
+ * or 16, in which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce,
+ * which can be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+ } cipher;
+ struct {
struct {
- uint32_t offset;
+ uint32_t offset;
/**< Starting point for hash processing, specified as
* number of bytes from start of packet in source
* buffer.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
+ * ignored. The field @ref aad field
* should be set instead.
*
* @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
@@ -432,180 +472,169 @@ struct rte_crypto_sym_op {
* of the AAD data in the source buffer.
*/
- uint32_t length;
+ uint32_t length;
/**< The message length, in bytes, of the source
* buffer that the hash will be computed on.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
+ * ignored. The field @ref aad field should be set
+ * instead.
*
* @note
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
* of operation, this field specifies the length of
* the AAD data in the source buffer.
*/
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
-
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
+ } data; /**< Data offsets and length for authentication */
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result
+ * should be inserted (in the case of digest generation)
+ * or where the purported digest exists (in the case of
+ * digest verification).
+ *
+ * At session creation time, the client specified the
+ * digest result length with the digest_length member
+ * of the @ref rte_crypto_auth_xform structure. For
+ * physical crypto devices the caller must allocate at
+ * least digest_length of physically contiguous memory
+ * at this location.
+ *
+ * For digest generation, the digest result will
+ * overwrite any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is
+ * understood to be in the destination buffer for
+ * digest generation, and in the source buffer for
+ * digest verification. The location of the digest
+ * result in this case is immediately following the
+ * region over which the digest is computed.
+ */
+ phys_addr_t phys_addr;
+ /**< Physical address of digest */
+ uint16_t length;
+ /**< Length of digest */
+ } digest; /**< Digest parameters */
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_sym_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
-
- void *user_data;
- /**< opaque pointer for user data */
-};
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD)
+ * needed for authenticated cipher mechanisms (CCM and
+ * GCM), and to the IV for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is
+ * set up for the session in the @ref
+ * rte_crypto_auth_xform structure as part of the @ref
+ * rte_cryptodev_session_create function call. This
+ * length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM),
+ * the caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset
+ * of one byte into the array, leaving room for the
+ * implementation to write in the flags to the first
+ * byte.
+ *
+ * - the additional authentication data itself should
+ * be written starting at an offset of 18 bytes into
+ * the array, leaving room for the length encoding in
+ * the first two bytes of the second block.
+ *
+ * - the array should be big enough to hold the above
+ * fields, plus any padding to round this up to the
+ * nearest multiple of the block size (16 bytes).
+ * Padding will be added by the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus
+ * any space to round this up to the nearest multiple
+ * of the block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set
+ * to 0. Instead the AAD data should be placed in the
+ * source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint16_t length; /**< Length of digest */
+ } aad;
+ /**< Additional authentication parameters */
+ } auth;
+} __rte_cache_aligned;
/**
- * Reset the fields of a crypto operation to their default values.
+ * Reset the fields of a symmetric operation to their default values.
*
* @param op The crypto operation to be reset.
*/
static inline void
__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
{
+ memset(op, 0, sizeof(*op));
+
op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
}
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type to
+ * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
+ * in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+__rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
+ void *priv_data, uint8_t nb_xforms)
+{
+ struct rte_crypto_sym_xform *xform;
+
+ sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
+
+ do {
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
+ xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
+ } while (xform);
+
+ return sym_op->xform;
+}
+
+
+/**
+ * Attach a session to a symmetric crypto operation
+ *
+ * @param op crypto operation
+ * @param sess cryptodev session
+ */
+static inline int
+__rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
struct rte_cryptodev_sym_session *sess)
{
- op->session = sess;
- op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+ sym_op->session = sess;
+ sym_op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+
+ return 0;
}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index c7fef6a..4632ca3 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1056,3 +1056,79 @@ rte_cryptodev_sym_session_free(uint8_t dev_id,
return NULL;
}
+
+/** Initialise rte_crypto_op mempool element */
+static void
+rte_crypto_op_init(struct rte_mempool *mempool,
+ void *opaque_arg,
+ void *_op_data,
+ __rte_unused unsigned i)
+{
+ struct rte_crypto_op *op = _op_data;
+ enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
+
+ memset(_op_data, 0, mempool->elt_size);
+
+ __rte_crypto_op_reset(op, type);
+
+ op->phys_addr = rte_mem_virt2phy(_op_data);
+ op->mempool = mempool;
+}
+
+
+struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id)
+{
+ struct rte_crypto_op_pool_private *priv;
+
+ unsigned elt_size = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) +
+ priv_size;
+
+ /* lookup mempool in case already allocated */
+ struct rte_mempool *mp = rte_mempool_lookup(name);
+
+ if (mp != NULL) {
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ if (mp->elt_size != elt_size ||
+ mp->cache_size < cache_size ||
+ mp->size < nb_elts ||
+ priv->priv_size < priv_size) {
+ mp = NULL;
+ CDEV_LOG_ERR("Mempool %s already exists but with "
+ "incompatible parameters", name);
+ return NULL;
+ }
+ return mp;
+ }
+
+ mp = rte_mempool_create(
+ name,
+ nb_elts,
+ elt_size,
+ cache_size,
+ sizeof(struct rte_crypto_op_pool_private),
+ NULL,
+ NULL,
+ rte_crypto_op_init,
+ &type,
+ socket_id,
+ 0);
+
+ if (mp == NULL) {
+ CDEV_LOG_ERR("Failed to create mempool %s", name);
+ return NULL;
+ }
+
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ priv->priv_size = priv_size;
+ priv->type = type;
+
+ return mp;
+}
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 732e2b9..b63cb57 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -48,8 +48,6 @@
extern "C" {
#endif
-#include "stddef.h"
-
#include "rte_crypto.h"
#include "rte_dev.h"
@@ -58,15 +56,18 @@ extern "C" {
#define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd")
/**< AES-NI Multi buffer PMD device name */
#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd")
-/**< Intel QAT SYM PMD device name */
+/**< Intel QAT Symmetric Crypto PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
- RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT SYM PMD */
+ RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT PMD Symmetric Crypto */
};
+
+extern const char **rte_cyptodev_names;
+
/* Logging Macros */
#define CDEV_LOG_ERR(fmt, args...) \
@@ -411,12 +412,12 @@ rte_cryptodev_callback_unregister(uint8_t dev_id,
rte_cryptodev_cb_fn cb_fn, void *cb_arg);
-typedef uint16_t (*dequeue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Dequeue processed packets from queue pair of a device. */
-typedef uint16_t (*enqueue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Enqueue packets for processing on queue pair of a device. */
@@ -489,66 +490,65 @@ struct rte_cryptodev_data {
extern struct rte_cryptodev *rte_cryptodevs;
/**
*
- * Dequeue a burst of processed packets from a queue of the crypto device.
- * The dequeued packets are stored in *rte_mbuf* structures whose pointers are
- * supplied in the *pkts* array.
+ * Dequeue a burst of processed crypto operations from a queue on the crypto
+ * device. The dequeued operation are stored in *rte_crypto_op* structures
+ * whose pointers are supplied in the *ops* array.
*
- * The rte_crypto_dequeue_burst() function returns the number of packets
- * actually dequeued, which is the number of *rte_mbuf* data structures
- * effectively supplied into the *pkts* array.
+ * The rte_cryptodev_dequeue_burst() function returns the number of ops
+ * actually dequeued, which is the number of *rte_crypto_op* data structures
+ * effectively supplied into the *ops* array.
*
- * A return value equal to *nb_pkts* indicates that the queue contained
- * at least *rx_pkts* packets, and this is likely to signify that other
- * received packets remain in the input queue. Applications implementing
- * a "retrieve as much received packets as possible" policy can check this
- * specific case and keep invoking the rte_crypto_dequeue_burst() function
- * until a value less than *nb_pkts* is returned.
+ * A return value equal to *nb_ops* indicates that the queue contained
+ * at least *nb_ops* operations, and this is likely to signify that other
+ * processed operations remain in the devices output queue. Applications
+ * implementing a "retrieve as many processed operations as possible" policy
+ * can check this specific case and keep invoking the
+ * rte_cryptodev_dequeue_burst() function until a value less than
+ * *nb_ops* is returned.
*
- * The rte_crypto_dequeue_burst() function does not provide any error
+ * The rte_cryptodev_dequeue_burst() function does not provide any error
* notification to avoid the corresponding overhead.
*
- * @param dev_id The identifier of the device.
+ * @param dev_id The symmetric crypto device identifier
* @param qp_id The index of the queue pair from which to
* retrieve processed packets. The value must be
* in the range [0, nb_queue_pair - 1] previously
* supplied to rte_cryptodev_configure().
- * @param pkts The address of an array of pointers to
- * *rte_mbuf* structures that must be large enough
- * to store *nb_pkts* pointers in it.
- * @param nb_pkts The maximum number of packets to dequeue.
+ * @param ops The address of an array of pointers to
+ * *rte_crypto_op* structures that must be
+ * large enough to store *nb_ops* pointers in it.
+ * @param nb_ops The maximum number of operations to dequeue.
*
* @return
- * - The number of packets actually dequeued, which is the number
- * of pointers to *rte_mbuf* structures effectively supplied to the
- * *pkts* array.
+ * - The number of operations actually dequeued, which is the number
+ * of pointers to *rte_crypto_op* structures effectively supplied to the
+ * *ops* array.
*/
static inline uint16_t
rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
- nb_pkts = (*dev->dequeue_burst)
- (dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ nb_ops = (*dev->dequeue_burst)
+ (dev->data->queue_pairs[qp_id], ops, nb_ops);
- return nb_pkts;
+ return nb_ops;
}
/**
- * Enqueue a burst of packets for processing on a crypto device.
- *
- * The rte_crypto_enqueue_burst() function is invoked to place packets
- * on the queue *queue_id* of the device designated by its *dev_id*.
+ * Enqueue a burst of operations for processing on a crypto device.
*
- * The *nb_pkts* parameter is the number of packets to process which are
- * supplied in the *pkts* array of *rte_mbuf* structures.
+ * The rte_cryptodev_enqueue_burst() function is invoked to place
+ * crypto operations on the queue *qp_id* of the device designated by
+ * its *dev_id*.
*
- * The rte_crypto_enqueue_burst() function returns the number of packets it
- * actually sent. A return value equal to *nb_pkts* means that all packets
- * have been sent.
+ * The *nb_ops* parameter is the number of operations to process which are
+ * supplied in the *ops* array of *rte_crypto_op* structures.
*
- * Each mbuf in the *pkts* array must have a valid *rte_mbuf_offload* structure
- * attached which contains a valid crypto operation.
+ * The rte_cryptodev_enqueue_burst() function returns the number of
+ * operations it actually enqueued for processing. A return value equal to
+ * *nb_ops* means that all packets have been enqueued.
*
* @param dev_id The identifier of the device.
* @param qp_id The index of the queue pair which packets are
@@ -556,25 +556,25 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
* must be in the range [0, nb_queue_pairs - 1]
* previously supplied to
* *rte_cryptodev_configure*.
- * @param pkts The address of an array of *nb_pkts* pointers
- * to *rte_mbuf* structures which contain the
- * output packets.
- * @param nb_pkts The number of packets to transmit.
+ * @param ops The address of an array of *nb_ops* pointers
+ * to *rte_crypto_op* structures which contain
+ * the crypto operations to be processed.
+ * @param nb_ops The number of operations to process.
*
* @return
- * The number of packets actually enqueued on the crypto device. The return
- * value can be less than the value of the *nb_pkts* parameter when the
- * crypto devices queue is full or has been filled up.
- * The number of packets is 0 if the device hasn't been started.
+ * The number of operations actually enqueued on the crypto device. The return
+ * value can be less than the value of the *nb_ops* parameter when the
+ * crypto devices queue is full or if invalid parameters are specified in
+ * a *rte_crypto_op*.
*/
static inline uint16_t
rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
return (*dev->enqueue_burst)(
- dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ dev->data->queue_pairs[qp_id], ops, nb_ops);
}
@@ -620,7 +620,6 @@ extern struct rte_cryptodev_sym_session *
rte_cryptodev_sym_session_create(uint8_t dev_id,
struct rte_crypto_sym_xform *xform);
-
/**
* Free the memory associated with a previously allocated session.
*
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index ff8e93d..39cd9ec 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -1,4 +1,4 @@
-DPDK_2.2 {
+DPDK_16.04 {
global:
rte_cryptodevs;
@@ -27,6 +27,7 @@ DPDK_2.2 {
rte_cryptodev_queue_pair_setup;
rte_cryptodev_queue_pair_start;
rte_cryptodev_queue_pair_stop;
+ rte_crypto_op_pool_create;
local: *;
};
\ No newline at end of file
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index c973e9b..400a849 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -728,9 +728,6 @@ typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */
typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
* with a single assignment */
-/** Opaque rte_mbuf_offload structure declarations */
-struct rte_mbuf_offload;
-
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
@@ -847,9 +844,6 @@ struct rte_mbuf {
/** Timesync flags for use with IEEE1588. */
uint16_t timesync;
-
- /* Chain of off-load operations to perform on mbuf */
- struct rte_mbuf_offload *offload_ops;
} __rte_cache_aligned;
static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
diff --git a/lib/librte_mbuf_offload/Makefile b/lib/librte_mbuf_offload/Makefile
deleted file mode 100644
index acdb449..0000000
--- a/lib/librte_mbuf_offload/Makefile
+++ /dev/null
@@ -1,52 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2015 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_mbuf_offload.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-EXPORT_MAP := rte_mbuf_offload_version.map
-
-LIBABIVER := 1
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) := rte_mbuf_offload.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD)-include := rte_mbuf_offload.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_cryptodev
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.c b/lib/librte_mbuf_offload/rte_mbuf_offload.c
deleted file mode 100644
index 5c0c9dd..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <rte_common.h>
-
-#include "rte_mbuf_offload.h"
-
-/** Initialize rte_mbuf_offload structure */
-static void
-rte_pktmbuf_offload_init(struct rte_mempool *mp,
- __rte_unused void *opaque_arg,
- void *_op_data,
- __rte_unused unsigned i)
-{
- struct rte_mbuf_offload *ol = _op_data;
-
- memset(_op_data, 0, mp->elt_size);
-
- ol->type = RTE_PKTMBUF_OL_NOT_SPECIFIED;
- ol->mp = mp;
-}
-
-
-struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id)
-{
- struct rte_pktmbuf_offload_pool_private *priv;
- unsigned elt_size = sizeof(struct rte_mbuf_offload) + priv_size;
-
-
- /* lookup mempool in case already allocated */
- struct rte_mempool *mp = rte_mempool_lookup(name);
-
- if (mp != NULL) {
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- if (priv->offload_priv_size < priv_size ||
- mp->elt_size != elt_size ||
- mp->cache_size < cache_size ||
- mp->size < size) {
- mp = NULL;
- return NULL;
- }
- return mp;
- }
-
- mp = rte_mempool_create(
- name,
- size,
- elt_size,
- cache_size,
- sizeof(struct rte_pktmbuf_offload_pool_private),
- NULL,
- NULL,
- rte_pktmbuf_offload_init,
- NULL,
- socket_id,
- 0);
-
- if (mp == NULL)
- return NULL;
-
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- priv->offload_priv_size = priv_size;
- return mp;
-}
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
deleted file mode 100644
index 5ce6058..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_MBUF_OFFLOAD_H_
-#define _RTE_MBUF_OFFLOAD_H_
-
-/**
- * @file
- * RTE mbuf offload
- *
- * The rte_mbuf_offload library provides the ability to specify a device generic
- * off-load operation independent of the current Rx/Tx Ethernet offloads
- * supported within the rte_mbuf structure, and add supports for multiple
- * off-load operations and offload device types.
- *
- * The rte_mbuf_offload specifies the particular off-load operation type, such
- * as a crypto operation, and provides a container for the operations
- * parameter's inside the op union. These parameters are then used by the
- * device which supports that operation to perform the specified offload.
- *
- * This library provides an API to create pre-allocated mempool of offload
- * operations, with supporting allocate and free functions. It also provides
- * APIs for attaching an offload to a mbuf, as well as an API to retrieve a
- * specified offload type from an mbuf offload chain.
- *
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- */
-
-#include <rte_mbuf.h>
-#include <rte_crypto.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** packet mbuf offload operation types */
-enum rte_mbuf_ol_op_type {
- RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
- /**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO_SYM
- /**< Crypto offload operation */
-};
-
-/**
- * Generic packet mbuf offload
- * This is used to specify a offload operation to be performed on a rte_mbuf.
- * Multiple offload operations can be chained to the same mbuf, but only a
- * single offload operation of a particular type can be in the chain
- */
-struct rte_mbuf_offload {
- struct rte_mbuf_offload *next; /**< next offload in chain */
- struct rte_mbuf *m; /**< mbuf offload is attached to */
- struct rte_mempool *mp; /**< mempool offload allocated from */
-
- enum rte_mbuf_ol_op_type type; /**< offload type */
- union {
- struct rte_crypto_sym_op crypto; /**< Crypto operation */
- } op;
-};
-
-/**< private data structure belonging to packet mbug offload mempool */
-struct rte_pktmbuf_offload_pool_private {
- uint16_t offload_priv_size;
- /**< Size of private area in each mbuf_offload. */
-};
-
-
-/**
- * Creates a mempool of rte_mbuf_offload objects
- *
- * EXPERIMENTAL: this API file may change without prior notice
- *
- * @param name mempool name
- * @param size number of objects in mempool
- * @param cache_size cache size of objects for each core
- * @param priv_size size of private data to be allocated with each
- * rte_mbuf_offload object
- * @param socket_id Socket on which to allocate mempool objects
- *
- * @return
- * - On success returns a valid mempool of rte_mbuf_offload objects
- * - On failure return NULL
- */
-extern struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id);
-
-
-/**
- * Returns private data size allocated with each rte_mbuf_offload object by
- * the mempool
- *
- * @param mpool rte_mbuf_offload mempool
- *
- * @return private data size
- */
-static inline uint16_t
-__rte_pktmbuf_offload_priv_size(struct rte_mempool *mpool)
-{
- struct rte_pktmbuf_offload_pool_private *priv =
- (struct rte_pktmbuf_offload_pool_private *)rte_mempool_get_priv(mpool);
-
- return priv->offload_priv_size;
-}
-
-/**
- * Get specified off-load operation type from mbuf.
- *
- * @param m packet mbuf.
- * @param type offload operation type requested.
- *
- * @return
- * - On success retruns rte_mbuf_offload pointer
- * - On failure returns NULL
- *
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_get(struct rte_mbuf *m, enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol;
-
- for (ol = m->offload_ops; ol != NULL; ol = ol->next)
- if (ol->type == type)
- return ol;
-
- return ol;
-}
-
-/**
- * Attach a rte_mbuf_offload to a mbuf. We only support a single offload of any
- * one type in our chain of offloads.
- *
- * @param m packet mbuf.
- * @param ol rte_mbuf_offload strucutre to be attached
- *
- * @returns
- * - On success returns the pointer to the offload we just added
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_attach(struct rte_mbuf *m, struct rte_mbuf_offload *ol)
-{
- struct rte_mbuf_offload **ol_last;
-
- for (ol_last = &m->offload_ops; ol_last[0] != NULL;
- ol_last = &ol_last[0]->next)
- if (ol_last[0]->type == ol->type)
- return NULL;
-
- ol_last[0] = ol;
- ol_last[0]->m = m;
- ol_last[0]->next = NULL;
-
- return ol_last[0];
-}
-
-
-/** Rearms rte_mbuf_offload default parameters */
-static inline void
-__rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
- enum rte_mbuf_ol_op_type type)
-{
- ol->m = NULL;
- ol->type = type;
-
- switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO_SYM:
- __rte_crypto_sym_op_reset(&ol->op.crypto); break;
- default:
- break;
- }
-}
-
-/** Allocate rte_mbuf_offload from mempool */
-static inline struct rte_mbuf_offload *
-__rte_pktmbuf_offload_raw_alloc(struct rte_mempool *mp)
-{
- void *buf = NULL;
-
- if (rte_mempool_get(mp, &buf) < 0)
- return NULL;
-
- return (struct rte_mbuf_offload *)buf;
-}
-
-/**
- * Allocate a rte_mbuf_offload with a specified operation type from
- * rte_mbuf_offload mempool
- *
- * @param mpool rte_mbuf_offload mempool
- * @param type offload operation type
- *
- * @returns
- * - On success returns a valid rte_mbuf_offload structure
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_alloc(struct rte_mempool *mpool,
- enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol = __rte_pktmbuf_offload_raw_alloc(mpool);
-
- if (ol != NULL)
- __rte_pktmbuf_offload_reset(ol, type);
-
- return ol;
-}
-
-/**
- * free rte_mbuf_offload structure
- */
-static inline void
-rte_pktmbuf_offload_free(struct rte_mbuf_offload *ol)
-{
- if (ol != NULL && ol->mp != NULL)
- rte_mempool_put(ol->mp, ol);
-}
-
-/**
- * Checks if the private data of a rte_mbuf_offload has enough capacity for
- * requested size
- *
- * @returns
- * - if sufficient space available returns pointer to start of private data
- * - if insufficient space returns NULL
- */
-static inline void *
-__rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
- uint16_t size)
-{
- uint16_t priv_size;
-
- if (likely(ol->mp != NULL)) {
- priv_size = __rte_pktmbuf_offload_priv_size(ol->mp);
-
- if (likely(priv_size >= size))
- return (void *)(ol + 1);
- }
- return NULL;
-}
-
-/**
- * Allocate space for crypto xforms in the private data space of the
- * rte_mbuf_offload. This also defaults the crypto xform type and configures
- * the chaining of the xform in the crypto operation
- *
- * @return
- * - On success returns pointer to first crypto xform in crypto operations chain
- * - On failure returns NULL
- */
-static inline struct rte_crypto_sym_xform *
-rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
- unsigned nb_xforms)
-{
- struct rte_crypto_sym_xform *xform;
- void *priv_data;
- uint16_t size;
-
- size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
- priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
-
- if (priv_data == NULL)
- return NULL;
-
- ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
-
- do {
- xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
- xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
- } while (xform);
-
- return ol->op.crypto.xform;
-}
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MBUF_OFFLOAD_H_ */
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map b/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
deleted file mode 100644
index 3d3b06a..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
+++ /dev/null
@@ -1,7 +0,0 @@
-DPDK_2.2 {
- global:
-
- rte_pktmbuf_offload_pool_create;
-
- local: *;
-};
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v4 0/2] cryptodev API changes
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 " Declan Doherty
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 1/2] cryptodev: API tidy and changes to support future extensions Declan Doherty
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 2/2] cryptodev: change burst API to be crypto op oriented Declan Doherty
@ 2016-02-29 17:47 ` Trahe, Fiona
2016-03-04 17:17 ` [dpdk-dev] [PATCH v5 " Fiona Trahe
` (6 subsequent siblings)
9 siblings, 0 replies; 62+ messages in thread
From: Trahe, Fiona @ 2016-02-29 17:47 UTC (permalink / raw)
To: Doherty, Declan, dev
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Declan Doherty
> Sent: Monday, February 29, 2016 4:52 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v4 0/2] cryptodev API changes
>
> This patch set separates the symmetric crypto operations from generic
> operations and then modifies the cryptodev burst API to accept bursts of
> rte_crypto_op rather than rte_mbufs.
>
> V4:
> - Fixes for issues introduced in __rte_crypto_op_raw_bulk_alloc in V3 patcheset.
> - Typo fix in cached attribute on rte_crypto_op structure.
>
> V3:
> - Addresses V2 comments
> - Rebased for head
>
>
> Declan Doherty (1):
> cryptodev: change burst API to be crypto op oriented
>
> Fiona Trahe (1):
> cryptodev: API tidy and changes to support future extensions
>
> MAINTAINERS | 6 +-
> app/test/test_cryptodev.c | 894 +++++++++++----------
> app/test/test_cryptodev.h | 9 +-
> app/test/test_cryptodev_perf.c | 270 ++++---
> config/common_bsdapp | 8 -
> config/common_linuxapp | 8 -
> doc/api/doxy-api-index.md | 1 -
> drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 199 ++---
> drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 18 +-
> drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 6 +-
> drivers/crypto/qat/qat_crypto.c | 150 ++--
> drivers/crypto/qat/qat_crypto.h | 14 +-
> drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
> examples/l2fwd-crypto/main.c | 300 ++++---
> lib/Makefile | 1 -
> lib/librte_cryptodev/Makefile | 1 +
> lib/librte_cryptodev/rte_crypto.h | 819 +++++++------------
> lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
> lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
> lib/librte_cryptodev/rte_cryptodev.h | 185 ++---
> lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
> lib/librte_cryptodev/rte_cryptodev_version.map | 3 +-
> lib/librte_mbuf/rte_mbuf.h | 6 -
> lib/librte_mbuf_offload/Makefile | 52 --
> lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
> lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 -------
> .../rte_mbuf_offload_version.map | 7 -
> 27 files changed, 2143 insertions(+), 2021 deletions(-) create mode 100644
> lib/librte_cryptodev/rte_crypto_sym.h
> delete mode 100644 lib/librte_mbuf_offload/Makefile delete mode 100644
> lib/librte_mbuf_offload/rte_mbuf_offload.c
> delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
> delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
>
> --
> 2.5.0
Series Acked-by: Fiona Trahe <fiona.trahe@intel.com>
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v4 1/2] cryptodev: API tidy and changes to support future extensions
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 1/2] cryptodev: API tidy and changes to support future extensions Declan Doherty
@ 2016-03-04 14:43 ` Thomas Monjalon
0 siblings, 0 replies; 62+ messages in thread
From: Thomas Monjalon @ 2016-03-04 14:43 UTC (permalink / raw)
To: Declan Doherty; +Cc: dev
2016-02-29 16:52, Declan Doherty:
> From: Fiona Trahe <fiona.trahe@intel.com>
>
> This patch splits symmetric specific definitions and functions away from the
> common crypto APIs to facilitate the future extension and expansion of the
> cryptodev framework, in order to allow asymmetric crypto operations to be
> introduced at a later date, as well as to clean the logical structure of the
> public includes. The patch also introduces the _sym prefix to symmetric
> specific structure and functions to improve clarity in the API.
>
> Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Linking test application fails because you forgot renaming the symbols in .map.
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v5 0/2] cryptodev API changes
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 " Declan Doherty
` (2 preceding siblings ...)
2016-02-29 17:47 ` [dpdk-dev] [PATCH v4 0/2] cryptodev API changes Trahe, Fiona
@ 2016-03-04 17:17 ` Fiona Trahe
2016-03-04 17:38 ` Thomas Monjalon
` (2 more replies)
2016-03-04 17:17 ` [dpdk-dev] [PATCH v5 1/2] This patch splits symmetric specific definitions and functions away from the common crypto APIs to facilitate the future extension and expansion of the cryptodev framework, in order to allow asymmetric crypto operations to be introduced at a later date, as well as to clean the logical structure of the public includes. The patch also introduces the _sym prefix to symmetric specific structure and functions to improve clarity in the API Fiona Trahe
` (5 subsequent siblings)
9 siblings, 3 replies; 62+ messages in thread
From: Fiona Trahe @ 2016-03-04 17:17 UTC (permalink / raw)
To: dev
This patch set separates the symmetric crypto operations from generic operations and then modifies the cryptodev burst API to accept bursts of rte_crypto_op rather than rte_mbufs.
V5:
- updates .map file
- removes EXPERIMENTAL label from rte_cryptodev.h
V4:
- Fixes for issues introduced in __rte_crypto_op_raw_bulk_alloc in V3 patcheset.
- Typo fix in cached attribute on rte_crypto_op structure.
V3:
- Addresses V2 comments
- Rebased for head
Declan Doherty (1):
cryptodev: change burst API to be crypto op oriented
Fiona Trahe (1):
cryptodev: API tidy and changes to support future extensions
MAINTAINERS | 6 +-
app/test/test_cryptodev.c | 894 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 270 ++++---
config/common_bsdapp | 8 -
config/common_linuxapp | 8 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 199 ++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 18 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 6 +-
drivers/crypto/qat/qat_crypto.c | 150 ++--
drivers/crypto/qat/qat_crypto.h | 14 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 300 ++++---
lib/Makefile | 1 -
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 819 +++++++------------
lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
lib/librte_cryptodev/rte_cryptodev.h | 189 ++---
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 7 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 -------
.../rte_mbuf_offload_version.map | 7 -
27 files changed, 2145 insertions(+), 2027 deletions(-)
create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
delete mode 100644 lib/librte_mbuf_offload/Makefile
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v5 1/2] This patch splits symmetric specific definitions and functions away from the common crypto APIs to facilitate the future extension and expansion of the cryptodev framework, in order to allow asymmetric crypto operations to be introduced at a later date, as well as to clean the logical structure of the public includes. The patch also introduces the _sym prefix to symmetric specific structure and functions to improve clarity in the API.
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 " Declan Doherty
` (3 preceding siblings ...)
2016-03-04 17:17 ` [dpdk-dev] [PATCH v5 " Fiona Trahe
@ 2016-03-04 17:17 ` Fiona Trahe
2016-03-04 17:17 ` [dpdk-dev] [PATCH v5 2/2] This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts rte_crypto_op's rather than the current implementation which operates on rte_mbuf bursts, this simplifies the burst processing in the crypto PMDs and the use of crypto operations in general Fiona Trahe
` (4 subsequent siblings)
9 siblings, 0 replies; 62+ messages in thread
From: Fiona Trahe @ 2016-03-04 17:17 UTC (permalink / raw)
To: dev
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
---
app/test/test_cryptodev.c | 164 +++---
app/test/test_cryptodev_perf.c | 79 +--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 44 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 6 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 4 +-
drivers/crypto/qat/qat_crypto.c | 51 +-
drivers/crypto/qat/qat_crypto.h | 10 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 33 +-
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 563 +------------------
lib/librte_cryptodev/rte_crypto_sym.h | 613 +++++++++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 39 +-
lib/librte_cryptodev/rte_cryptodev.h | 80 ++-
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 6 +-
lib/librte_mbuf_offload/rte_mbuf_offload.h | 22 +-
17 files changed, 915 insertions(+), 840 deletions(-)
create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 62f8fb0..951b443 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,13 +57,13 @@ struct crypto_testsuite_params {
};
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
struct rte_mbuf_offload *ol;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -78,7 +78,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_param);
@@ -165,7 +165,8 @@ testsuite_setup(void)
ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
"MBUF_OFFLOAD_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_xform),
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -220,7 +221,7 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id,
&ts_params->conf),
@@ -275,7 +276,7 @@ ut_setup(void)
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
ts_params->conf.session_mp.nb_objs =
- (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_PMD) ?
+ (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_SYM_PMD) ?
DEFAULT_NUM_OPS_INFLIGHT :
DEFAULT_NUM_OPS_INFLIGHT;
@@ -319,7 +320,7 @@ ut_teardown(void)
/* free crypto session structure */
if (ut_params->sess) {
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
ut_params->sess);
ut_params->sess = NULL;
}
@@ -464,7 +465,7 @@ test_queue_pair_descriptor_setup(void)
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- ts_params->conf.session_mp.nb_objs = dev_info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = dev_info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
&ts_params->conf), "Failed to configure cryptodev %u",
@@ -766,7 +767,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -776,7 +777,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -786,20 +787,21 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -862,18 +864,18 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_xforms(
+ TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
ut_params->ol, 2),
"failed to allocate space for crypto transforms");
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
/* cipher parameters */
ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
@@ -882,7 +884,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
@@ -958,7 +960,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_SHA1);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -967,7 +969,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -977,13 +979,14 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -991,7 +994,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1066,7 +1069,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1075,7 +1078,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1085,13 +1088,14 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1099,7 +1103,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1168,7 +1172,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_SHA256);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1177,7 +1181,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1187,13 +1191,14 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1201,7 +1206,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1282,7 +1287,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1291,7 +1296,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1301,15 +1306,16 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1317,7 +1323,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1371,7 +1377,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params);
@@ -1386,8 +1392,9 @@ test_AES_CBC_HMAC_SHA512_decrypt_digest_verify(void)
"Failed to create session params");
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
return test_AES_CBC_HMAC_SHA512_decrypt_perform(ut_params->sess,
@@ -1400,7 +1407,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
{
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1409,7 +1416,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1422,7 +1429,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params)
{
@@ -1442,7 +1449,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1450,7 +1457,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1522,7 +1529,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1531,7 +1538,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1541,13 +1548,14 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1555,7 +1563,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)
rte_pktmbuf_prepend(ut_params->ibuf,
@@ -1615,7 +1623,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_AES_XCBC);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1624,7 +1632,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1634,13 +1642,14 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1648,7 +1657,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
@@ -1752,7 +1761,7 @@ test_multi_session(void)
struct crypto_unittest_params *ut_params = &unittest_params;
struct rte_cryptodev_info dev_info;
- struct rte_cryptodev_session **sessions;
+ struct rte_cryptodev_sym_session **sessions;
uint16_t i;
@@ -1761,12 +1770,13 @@ test_multi_session(void)
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- sessions = rte_malloc(NULL, (sizeof(struct rte_cryptodev_session *) *
- dev_info.max_nb_sessions) + 1, 0);
+ sessions = rte_malloc(NULL,
+ (sizeof(struct rte_cryptodev_sym_session *) *
+ dev_info.sym.max_nb_sessions) + 1, 0);
/* Create multiple crypto sessions*/
- for (i = 0; i < dev_info.max_nb_sessions; i++) {
- sessions[i] = rte_cryptodev_session_create(
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++) {
+ sessions[i] = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(sessions[i],
@@ -1781,13 +1791,13 @@ test_multi_session(void)
}
/* Next session create should fail */
- sessions[i] = rte_cryptodev_session_create(ts_params->valid_devs[0],
+ sessions[i] = rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NULL(sessions[i],
"Session creation succeeded unexpectedly!");
- for (i = 0; i < dev_info.max_nb_sessions; i++)
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++)
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
sessions[i]);
rte_free(sessions);
@@ -1806,7 +1816,7 @@ test_not_in_place_crypto(void)
/* Create multiple crypto sessions*/
- ut_params->sess = rte_cryptodev_session_create(
+ ut_params->sess = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0], &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1828,7 +1838,7 @@ test_not_in_place_crypto(void)
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1836,7 +1846,7 @@ test_not_in_place_crypto(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1962,7 +1972,7 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite = {
static int
test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_qat_testsuite);
}
static struct test_command cryptodev_qat_cmd = {
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 728bcf0..42dd9bc 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -63,12 +63,12 @@ struct crypto_testsuite_params {
#define MAX_NUM_OF_OPS_PER_UT (128)
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf_offload *ol;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
@@ -127,7 +127,7 @@ testsuite_setup(void)
ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -179,7 +179,7 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->dev_id,
&ts_params->conf),
@@ -252,7 +252,7 @@ ut_teardown(void)
/* free crypto session structure */
if (ut_params->sess)
- rte_cryptodev_session_free(ts_params->dev_id,
+ rte_cryptodev_sym_session_free(ts_params->dev_id,
ut_params->sess);
/* free crypto operation structure */
@@ -1713,7 +1713,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1723,7 +1723,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1733,7 +1733,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1741,7 +1741,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < num_to_submit ; b++) {
tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ (const char *)data_params[0].expected.ciphertext,
data_params[0].length, 0);
TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
@@ -1753,12 +1753,12 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
DIGEST_BYTE_LENGTH_SHA256);
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO);
+ ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
@@ -1814,7 +1814,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, burst_size);
if (burst_received == 0)
failed_polls++;
@@ -1824,10 +1825,13 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
total_cycles += end_cycles - start_cycles;
}
while (num_received != num_to_submit) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num,
+ 0, NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, burst_size);
if (burst_received == 0)
failed_polls++;
@@ -1876,7 +1880,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1885,7 +1889,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
ut_params->cipher_xform.cipher.key.length = CIPHER_IV_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1895,7 +1899,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1928,12 +1932,12 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1957,15 +1961,19 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent) < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent : DEFAULT_BURST_SIZE);
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, tx_mbufs,
+ ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
+ < DEFAULT_BURST_SIZE) ?
+ DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
+ DEFAULT_BURST_SIZE);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
@@ -1973,10 +1981,13 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
num_received += burst_received;
}
while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num,
+ 0, NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num, 0,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num, 0,
rx_mbufs, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
@@ -1987,7 +1998,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
mmps = (double)num_received*mhz/(end_cycles - start_cycles);
throughput = mmps*data_params[index].length*8;
printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
- data_params[index].length, num_sent, num_received);
+ data_params[index].length, num_sent, num_received);
printf("\t%.2f\t%u", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
@@ -2044,7 +2055,7 @@ perftest_aesni_mb_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
static int
perftest_qat_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_testsuite);
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index c7d884c..61d93cd 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -109,7 +109,7 @@ calculate_auth_precomputes(hash_one_block_t one_block_hash,
/** Get xform chain order */
static int
-aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
{
/*
* Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
@@ -119,12 +119,12 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
if (xform->next == NULL || xform->next->next != NULL)
return -1;
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return HASH_CIPHER;
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return CIPHER_HASH;
return -1;
@@ -134,11 +134,11 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
static int
aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
hash_one_block_t hash_oneblock_fn;
- if (xform->type != RTE_CRYPTO_XFORM_AUTH) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
MB_LOG_ERR("Crypto xform struct not of type auth");
return -1;
}
@@ -196,11 +196,11 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
static int
aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
aes_keyexp_t aes_keyexp_fn;
- if (xform->type != RTE_CRYPTO_XFORM_CIPHER) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
MB_LOG_ERR("Crypto xform struct not of type cipher");
return -1;
}
@@ -259,10 +259,10 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
- const struct rte_crypto_xform *auth_xform = NULL;
- const struct rte_crypto_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
/* Select Crypto operation - hash then cipher / cipher then hash */
switch (aesni_mb_get_chain_order(xform)) {
@@ -296,11 +296,11 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
if (unlikely(crypto_op->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
@@ -313,7 +313,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
return NULL;
sess = (struct aesni_mb_session *)
- ((struct rte_cryptodev_session *)_sess)->_private;
+ ((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
sess, crypto_op->xform) != 0)) {
@@ -339,7 +339,8 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
*/
static JOB_AES_HMAC *
process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_op *c_op, struct aesni_mb_session *session)
+ struct rte_crypto_sym_op *c_op,
+ struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
@@ -432,14 +433,14 @@ static struct rte_mbuf *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
struct rte_mbuf *m;
- struct rte_crypto_op *c_op;
+ struct rte_crypto_sym_op *c_op;
if (job->user_data == NULL)
return NULL;
/* handled retrieved job */
m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_op *)job->user_data2;
+ c_op = (struct rte_crypto_sym_op *)job->user_data2;
/* set status as successful by default */
c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -459,7 +460,7 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_OP_SESSIONLESS) {
+ if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
rte_mempool_put(qp->sess_mp, c_op->session);
c_op->session = NULL;
}
@@ -511,7 +512,8 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
int i, processed_jobs = 0;
for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i], RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(bufs[i],
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
qp->qp_stats.enqueue_err_count++;
goto flush_jobs;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index e43b898..3cd9990 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -108,7 +108,7 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
if (dev_info != NULL) {
dev_info->dev_type = dev->dev_type;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->max_nb_sessions = internals->max_nb_sessions;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
}
}
@@ -243,7 +243,7 @@ aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
/** Configure a aesni multi-buffer session from a crypto xform chain */
static void *
aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform, void *sess)
{
struct aesni_mb_private *internals = dev->data->dev_private;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 2f98609..ab70c15 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -218,7 +218,7 @@ struct aesni_mb_session {
extern int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform);
+ const struct rte_crypto_sym_xform *xform);
/** device specific operations function pointer structure */
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 828756b..e7b9027 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,37 +89,37 @@ void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
}
static int
-qat_get_cmd_id(const struct rte_crypto_xform *xform)
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
{
if (xform->next == NULL)
return -1;
/* Cipher Only */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_CIPHER; */
/* Authentication Only */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_AUTH; */
/* Cipher then Authenticate */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
/* Authenticate then Cipher */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
return -1;
}
static struct rte_crypto_auth_xform *
-qat_get_auth_xform(struct rte_crypto_xform *xform)
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return &xform->auth;
xform = xform->next;
@@ -129,10 +129,10 @@ qat_get_auth_xform(struct rte_crypto_xform *xform)
}
static struct rte_crypto_cipher_xform *
-qat_get_cipher_xform(struct rte_crypto_xform *xform)
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return &xform->cipher;
xform = xform->next;
@@ -144,7 +144,7 @@ qat_get_cipher_xform(struct rte_crypto_xform *xform)
void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private)
+ struct rte_crypto_sym_xform *xform, void *session_private)
{
struct qat_pmd_private *internals = dev->data->dev_private;
@@ -261,7 +261,7 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
auth_xform->digest_length))
goto error_out;
- return (struct rte_cryptodev_session *)session;
+ return (struct rte_crypto_sym_session *)session;
error_out:
rte_mempool_put(internals->sess_mp, session);
@@ -275,7 +275,7 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
+uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
register struct qat_queue *queue;
@@ -327,7 +327,8 @@ kick_tail:
}
uint16_t
-qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct rte_mbuf_offload *ol;
struct qat_queue *queue;
@@ -343,12 +344,13 @@ qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
msg_counter != nb_pkts) {
rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf, RTE_PKTMBUF_OL_CRYPTO);
-
+ ol = rte_pktmbuf_offload_get(rx_mbuf,
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ ol->op.crypto.status =
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
@@ -384,20 +386,21 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
"to (%p) mbuf.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
" requests mbuf (%p) is sessionless.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type != RTE_CRYPTODEV_QAT_PMD)) {
+ if (unlikely(ol->op.crypto.session->type
+ != RTE_CRYPTODEV_QAT_SYM_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
@@ -520,8 +523,8 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
ADF_NUM_SYM_QPS_PER_BUNDLE *
ADF_NUM_BUNDLES_PER_DEV;
- info->max_nb_sessions = internals->max_nb_sessions;
- info->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
}
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index d680364..e9f71fe 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -109,16 +109,18 @@ qat_crypto_sym_session_init(struct rte_mempool *mempool, void *priv_sess);
extern void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+ struct rte_crypto_sym_xform *xform, void *session_private);
extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
uint16_t
-qat_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
uint16_t
-qat_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index e500c1e..85700fc 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,11 +89,11 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->pci_dev->addr.devid,
cryptodev->pci_dev->addr.function);
- cryptodev->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
+ cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index a950b74..93de786 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -124,13 +124,13 @@ struct l2fwd_crypto_options {
enum l2fwd_crypto_xform_chain xform_chain;
- struct rte_crypto_xform cipher_xform;
+ struct rte_crypto_sym_xform cipher_xform;
uint8_t ckey_data[32];
- struct rte_crypto_key iv_key;
+ struct rte_crypto_sym_key iv_key;
uint8_t ivkey_data[16];
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform auth_xform;
uint8_t akey_data[128];
};
@@ -142,8 +142,11 @@ struct l2fwd_crypto_params {
unsigned digest_length;
unsigned block_size;
- struct rte_crypto_key iv_key;
- struct rte_cryptodev_session *session;
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } iv;
+ struct rte_cryptodev_sym_session *session;
};
/** lcore configuration */
@@ -372,7 +375,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
/* Append space for digest to end of packet */
ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
@@ -474,11 +477,11 @@ generate_random_key(uint8_t *key, unsigned length)
key[i] = rand() % 0xff;
}
-static struct rte_cryptodev_session *
+static struct rte_cryptodev_sym_session *
initialize_crypto_session(struct l2fwd_crypto_options *options,
uint8_t cdev_id)
{
- struct rte_crypto_xform *first_xform;
+ struct rte_crypto_sym_xform *first_xform;
if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
first_xform = &options->cipher_xform;
@@ -489,7 +492,7 @@ initialize_crypto_session(struct l2fwd_crypto_options *options,
}
/* Setup Cipher Parameters */
- return rte_cryptodev_session_create(cdev_id, first_xform);
+ return rte_cryptodev_sym_session_create(cdev_id, first_xform);
}
static void
@@ -610,7 +613,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
m = pkts_burst[j];
ol = rte_pktmbuf_offload_alloc(
l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
/*
* If we can't allocate a offload, then drop
* the rest of the burst and dequeue and
@@ -689,7 +692,7 @@ parse_cryptodev_type(enum rte_cryptodev_type *type, char *optarg)
*type = RTE_CRYPTODEV_AESNI_MB_PMD;
return 0;
} else if (strcmp("QAT", optarg) == 0) {
- *type = RTE_CRYPTODEV_QAT_PMD;
+ *type = RTE_CRYPTODEV_QAT_SYM_PMD;
return 0;
}
@@ -745,7 +748,7 @@ parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
/** Parse crypto key command line argument */
static int
-parse_key(struct rte_crypto_key *key __rte_unused,
+parse_key(struct rte_crypto_sym_key *key __rte_unused,
unsigned length __rte_unused, char *arg __rte_unused)
{
printf("Currently an unsupported argument!\n");
@@ -982,7 +985,7 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
switch (options->cdev_type) {
case RTE_CRYPTODEV_AESNI_MB_PMD:
printf("cryptodev type: AES-NI MB PMD\n"); break;
- case RTE_CRYPTODEV_QAT_PMD:
+ case RTE_CRYPTODEV_QAT_SYM_PMD:
printf("cryptodev type: QAT PMD\n"); break;
default:
break;
@@ -1179,7 +1182,7 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports)
unsigned i, cdev_id, cdev_count, enabled_cdev_count = 0;
int retval;
- if (options->cdev_type == RTE_CRYPTODEV_QAT_PMD) {
+ if (options->cdev_type == RTE_CRYPTODEV_QAT_SYM_PMD) {
if (rte_cryptodev_count() < nb_ports)
return -1;
} else if (options->cdev_type == RTE_CRYPTODEV_AESNI_MB_PMD) {
diff --git a/lib/librte_cryptodev/Makefile b/lib/librte_cryptodev/Makefile
index 81fa3fc..0d59229 100644
--- a/lib/librte_cryptodev/Makefile
+++ b/lib/librte_cryptodev/Makefile
@@ -45,6 +45,7 @@ SRCS-y += rte_cryptodev.c
# export include files
SYMLINK-y-include += rte_crypto.h
+SYMLINK-y-include += rte_crypto_sym.h
SYMLINK-y-include += rte_cryptodev.h
SYMLINK-y-include += rte_cryptodev_pmd.h
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index 42343a8..df0c0b8 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,572 +36,15 @@
/**
* @file rte_crypto.h
*
- * RTE Cryptographic Definitions
+ * RTE Cryptography Common Definitions
*
- * Defines symmetric cipher and authentication algorithms and modes, as well
- * as supported symmetric crypto operation combinations.
*/
#ifdef __cplusplus
extern "C" {
#endif
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_mempool.h>
-
-/** Symmetric Cipher Algorithms */
-enum rte_crypto_cipher_algorithm {
- RTE_CRYPTO_CIPHER_NULL = 1,
- /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
-
- RTE_CRYPTO_CIPHER_3DES_CBC,
- /**< Triple DES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_3DES_CTR,
- /**< Triple DES algorithm in CTR mode */
- RTE_CRYPTO_CIPHER_3DES_ECB,
- /**< Triple DES algorithm in ECB mode */
-
- RTE_CRYPTO_CIPHER_AES_CBC,
- /**< AES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_AES_CCM,
- /**< AES algorithm in CCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_CCM* element of the
- * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_xform* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation
- */
- RTE_CRYPTO_CIPHER_AES_CTR,
- /**< AES algorithm in Counter mode */
- RTE_CRYPTO_CIPHER_AES_ECB,
- /**< AES algorithm in ECB mode */
- RTE_CRYPTO_CIPHER_AES_F8,
- /**< AES algorithm in F8 mode */
- RTE_CRYPTO_CIPHER_AES_GCM,
- /**< AES algorithm in GCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_GCM* element of the
- * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_setup_data* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation.
- */
- RTE_CRYPTO_CIPHER_AES_XTS,
- /**< AES algorithm in XTS mode */
-
- RTE_CRYPTO_CIPHER_ARC4,
- /**< (A)RC4 cipher algorithm */
-
- RTE_CRYPTO_CIPHER_KASUMI_F8,
- /**< Kasumi algorithm in F8 mode */
-
- RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- /**< SNOW3G algorithm in UEA2 mode */
-
- RTE_CRYPTO_CIPHER_ZUC_EEA3
- /**< ZUC algorithm in EEA3 mode */
-};
-
-/** Symmetric Cipher Direction */
-enum rte_crypto_cipher_operation {
- RTE_CRYPTO_CIPHER_OP_ENCRYPT,
- /**< Encrypt cipher operation */
- RTE_CRYPTO_CIPHER_OP_DECRYPT
- /**< Decrypt cipher operation */
-};
-
-/** Crypto key structure */
-struct rte_crypto_key {
- uint8_t *data; /**< pointer to key data */
- phys_addr_t phys_addr;
- size_t length; /**< key length in bytes */
-};
-
-/**
- * Symmetric Cipher Setup Data.
- *
- * This structure contains data relating to Cipher (Encryption and Decryption)
- * use to create a session.
- */
-struct rte_crypto_cipher_xform {
- enum rte_crypto_cipher_operation op;
- /**< This parameter determines if the cipher operation is an encrypt or
- * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
- * only encrypt operations are valid.
- */
- enum rte_crypto_cipher_algorithm algo;
- /**< Cipher algorithm */
-
- struct rte_crypto_key key;
- /**< Cipher key
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
- * point to a concatenation of the AES encryption key followed by a
- * keymask. As per RFC3711, the keymask should be padded with trailing
- * bytes to match the length of the encryption key used.
- *
- * For AES-XTS mode of operation, two keys must be provided and
- * key.data must point to the two keys concatenated together (Key1 ||
- * Key2). The cipher key length will contain the total size of both
- * keys.
- *
- * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
- * 192 bits (24 bytes) or 256 bits (32 bytes).
- *
- * For the CCM mode of operation, the only supported key length is 128
- * bits (16 bytes).
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
- * should be set to the combined length of the encryption key and the
- * keymask. Since the keymask and the encryption key are the same size,
- * key.length should be set to 2 x the AES encryption key length.
- *
- * For the AES-XTS mode of operation:
- * - Two keys must be provided and key.length refers to total length of
- * the two keys.
- * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
- * - Both keys must have the same size.
- **/
-};
-
-/** Symmetric Authentication / Hash Algorithms */
-enum rte_crypto_auth_algorithm {
- RTE_CRYPTO_AUTH_NULL = 1,
- /**< NULL hash algorithm. */
-
- RTE_CRYPTO_AUTH_AES_CBC_MAC,
- /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
- RTE_CRYPTO_AUTH_AES_CCM,
- /**< AES algorithm in CCM mode. This is an authenticated cipher. When
- * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
- * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
- * set up the related rte_crypto_cipher_setup_data structure in the
- * session context or the corresponding parameter in the crypto
- * operation data structures op_params parameter MUST be set for a
- * session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_CMAC,
- /**< AES CMAC algorithm. */
- RTE_CRYPTO_AUTH_AES_GCM,
- /**< AES algorithm in GCM mode. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_GMAC,
- /**< AES GMAC algorithm. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_XCBC_MAC,
- /**< AES XCBC algorithm. */
-
- RTE_CRYPTO_AUTH_KASUMI_F9,
- /**< Kasumi algorithm in F9 mode. */
-
- RTE_CRYPTO_AUTH_MD5,
- /**< MD5 algorithm */
- RTE_CRYPTO_AUTH_MD5_HMAC,
- /**< HMAC using MD5 algorithm */
-
- RTE_CRYPTO_AUTH_SHA1,
- /**< 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA1_HMAC,
- /**< HMAC using 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224,
- /**< 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224_HMAC,
- /**< HMAC using 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256,
- /**< 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256_HMAC,
- /**< HMAC using 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384,
- /**< 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384_HMAC,
- /**< HMAC using 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512,
- /**< 512 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512_HMAC,
- /**< HMAC using 512 bit SHA algorithm. */
-
- RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- /**< SNOW3G algorithm in UIA2 mode. */
-
- RTE_CRYPTO_AUTH_ZUC_EIA3,
- /**< ZUC algorithm in EIA3 mode */
-};
-
-/** Symmetric Authentication / Hash Operations */
-enum rte_crypto_auth_operation {
- RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
- RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
-};
-
-/**
- * Authentication / Hash transform data.
- *
- * This structure contains data relating to an authentication/hash crypto
- * transforms. The fields op, algo and digest_length are common to all
- * authentication transforms and MUST be set.
- */
-struct rte_crypto_auth_xform {
- enum rte_crypto_auth_operation op;
- /**< Authentication operation type */
- enum rte_crypto_auth_algorithm algo;
- /**< Authentication algorithm selection */
-
- struct rte_crypto_key key; /**< Authentication key data.
- * The authentication key length MUST be less than or equal to the
- * block size of the algorithm. It is the callers responsibility to
- * ensure that the key length is compliant with the standard being used
- * (for example RFC 2104, FIPS 198a).
- */
-
- uint32_t digest_length;
- /**< Length of the digest to be returned. If the verify option is set,
- * this specifies the length of the digest to be compared for the
- * session.
- *
- * If the value is less than the maximum length allowed by the hash,
- * the result shall be truncated. If the value is greater than the
- * maximum length allowed by the hash then an error will be generated
- * by *rte_cryptodev_session_create* or by the
- * *rte_cryptodev_enqueue_burst* if using session-less APIs.
- */
-
- uint32_t add_auth_data_length;
- /**< The length of the additional authenticated data (AAD) in bytes.
- * The maximum permitted value is 240 bytes, unless otherwise specified
- * below.
- *
- * This field must be specified when the hash algorithm is one of the
- * following:
- *
- * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
- * length of the IV (which should be 16).
- *
- * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
- * the length of the Additional Authenticated Data (called A, in NIST
- * SP800-38D).
- *
- * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
- * the length of the associated data (called A, in NIST SP800-38C).
- * Note that this does NOT include the length of any padding, or the
- * 18 bytes reserved at the start of the above field to store the
- * block B0 and the encoded length. The maximum permitted value in
- * this case is 222 bytes.
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
- * this field is not used and should be set to 0. Instead the length
- * of the AAD data is specified in the message length to hash field of
- * the rte_crypto_op_data structure.
- */
-};
-
-/** Crypto transformation types */
-enum rte_crypto_xform_type {
- RTE_CRYPTO_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
- RTE_CRYPTO_XFORM_AUTH, /**< Authentication xform */
- RTE_CRYPTO_XFORM_CIPHER /**< Cipher xform */
-};
-
-/**
- * Crypto transform structure.
- *
- * This is used to specify the crypto transforms required, multiple transforms
- * can be chained together to specify a chain transforms such as authentication
- * then cipher, or cipher then authentication. Each transform structure can
- * hold a single transform, the type field is used to specify which transform
- * is contained within the union
- */
-struct rte_crypto_xform {
- struct rte_crypto_xform *next; /**< next xform in chain */
-
- enum rte_crypto_xform_type type; /**< xform type */
- union {
- struct rte_crypto_auth_xform auth;
- /**< Authentication / hash xform */
- struct rte_crypto_cipher_xform cipher;
- /**< Cipher xform */
- };
-};
-
-/**
- * Crypto operation session type. This is used to specify whether a crypto
- * operation has session structure attached for immutable parameters or if all
- * operation information is included in the operation data structure.
- */
-enum rte_crypto_op_sess_type {
- RTE_CRYPTO_OP_WITH_SESSION, /**< Session based crypto operation */
- RTE_CRYPTO_OP_SESSIONLESS /**< Session-less crypto operation */
-};
-
-/** Status of crypto operation */
-enum rte_crypto_op_status {
- RTE_CRYPTO_OP_STATUS_SUCCESS,
- /**< Operation completed successfully */
- RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
- /**< Operation not yet submitted to a cryptodev */
- RTE_CRYPTO_OP_STATUS_ENQUEUED,
- /**< Operation is enqueued on device */
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
- /**< Authentication verification failed */
- RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
- /**< Operation failed due to invalid arguments in request */
- RTE_CRYPTO_OP_STATUS_ERROR,
- /**< Error handling operation */
-};
-
-/**
- * Cryptographic Operation Data.
- *
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_enqueue_burst() call
- * for performing cipher, hash, or a combined hash and cipher operations.
- */
-struct rte_crypto_op {
- enum rte_crypto_op_sess_type type;
- enum rte_crypto_op_status status;
-
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
-
- union {
- struct rte_cryptodev_session *session;
- /**< Handle for the initialised session context */
- struct rte_crypto_xform *xform;
- /**< Session-less API crypto operation parameters */
- };
-
- struct {
- struct {
- uint32_t offset;
- /**< Starting point for cipher processing, specified
- * as number of bytes from start of data in the source
- * buffer. The result of the cipher operation will be
- * written back into the output buffer starting at
- * this location.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source buffer
- * on which the cryptographic operation will be
- * computed. This must be a multiple of the block size
- * if a block cipher is being used. This is also the
- * same as the result length.
- *
- * @note
- * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
- * this value should not include the length of the
- * padding or the length of the MAC; the driver will
- * compute the actual number of bytes over which the
- * encryption will occur, which will include these
- * values.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
- * field should be set to 0.
- */
- } to_cipher; /**< Data offsets and length for ciphering */
-
- struct {
- uint32_t offset;
- /**< Starting point for hash processing, specified as
- * number of bytes from start of packet in source
- * buffer.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
- * mode of operation, this field specifies the start
- * of the AAD data in the source buffer.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source
- * buffer that the hash will be computed on.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
- * of operation, this field specifies the length of
- * the AAD data in the source buffer.
- */
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
-
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
-
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
-
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
-
- void *user_data;
- /**< opaque pointer for user data */
-};
-
-
-/**
- * Reset the fields of a crypto operation to their default values.
- *
- * @param op The crypto operation to be reset.
- */
-static inline void
-__rte_crypto_op_reset(struct rte_crypto_op *op)
-{
- op->type = RTE_CRYPTO_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
-}
-
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_op_attach_session(struct rte_crypto_op *op,
- struct rte_cryptodev_session *sess)
-{
- op->session = sess;
- op->type = RTE_CRYPTO_OP_WITH_SESSION;
-}
+#include <rte_crypto_sym.h>
#ifdef __cplusplus
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
new file mode 100644
index 0000000..cb2b8f6
--- /dev/null
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -0,0 +1,613 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_SYM_H_
+#define _RTE_CRYPTO_SYM_H_
+
+/**
+ * @file rte_crypto_sym.h
+ *
+ * RTE Definitions for Symmetric Cryptography
+ *
+ * Defines symmetric cipher and authentication algorithms and modes, as well
+ * as supported symmetric crypto operation combinations.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+
+/** Symmetric Cipher Algorithms */
+enum rte_crypto_cipher_algorithm {
+ RTE_CRYPTO_CIPHER_NULL = 1,
+ /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
+
+ RTE_CRYPTO_CIPHER_3DES_CBC,
+ /**< Triple DES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_3DES_CTR,
+ /**< Triple DES algorithm in CTR mode */
+ RTE_CRYPTO_CIPHER_3DES_ECB,
+ /**< Triple DES algorithm in ECB mode */
+
+ RTE_CRYPTO_CIPHER_AES_CBC,
+ /**< AES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_AES_CCM,
+ /**< AES algorithm in CCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_CCM* element of the
+ * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_xform* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation
+ */
+ RTE_CRYPTO_CIPHER_AES_CTR,
+ /**< AES algorithm in Counter mode */
+ RTE_CRYPTO_CIPHER_AES_ECB,
+ /**< AES algorithm in ECB mode */
+ RTE_CRYPTO_CIPHER_AES_F8,
+ /**< AES algorithm in F8 mode */
+ RTE_CRYPTO_CIPHER_AES_GCM,
+ /**< AES algorithm in GCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_GCM* element of the
+ * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_setup_data* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_CIPHER_AES_XTS,
+ /**< AES algorithm in XTS mode */
+
+ RTE_CRYPTO_CIPHER_ARC4,
+ /**< (A)RC4 cipher algorithm */
+
+ RTE_CRYPTO_CIPHER_KASUMI_F8,
+ /**< Kasumi algorithm in F8 mode */
+
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ /**< SNOW3G algorithm in UEA2 mode */
+
+ RTE_CRYPTO_CIPHER_ZUC_EEA3
+ /**< ZUC algorithm in EEA3 mode */
+};
+
+/** Symmetric Cipher Direction */
+enum rte_crypto_cipher_operation {
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ /**< Encrypt cipher operation */
+ RTE_CRYPTO_CIPHER_OP_DECRYPT
+ /**< Decrypt cipher operation */
+};
+
+/**
+ * Symmetric Cipher Setup Data.
+ *
+ * This structure contains data relating to Cipher (Encryption and Decryption)
+ * use to create a session.
+ */
+struct rte_crypto_cipher_xform {
+ enum rte_crypto_cipher_operation op;
+ /**< This parameter determines if the cipher operation is an encrypt or
+ * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
+ * only encrypt operations are valid.
+ */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< Cipher algorithm */
+
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } key;
+ /**< Cipher key
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
+ * point to a concatenation of the AES encryption key followed by a
+ * keymask. As per RFC3711, the keymask should be padded with trailing
+ * bytes to match the length of the encryption key used.
+ *
+ * For AES-XTS mode of operation, two keys must be provided and
+ * key.data must point to the two keys concatenated together (Key1 ||
+ * Key2). The cipher key length will contain the total size of both
+ * keys.
+ *
+ * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
+ * 192 bits (24 bytes) or 256 bits (32 bytes).
+ *
+ * For the CCM mode of operation, the only supported key length is 128
+ * bits (16 bytes).
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
+ * should be set to the combined length of the encryption key and the
+ * keymask. Since the keymask and the encryption key are the same size,
+ * key.length should be set to 2 x the AES encryption key length.
+ *
+ * For the AES-XTS mode of operation:
+ * - Two keys must be provided and key.length refers to total length of
+ * the two keys.
+ * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
+ * - Both keys must have the same size.
+ **/
+};
+
+/** Symmetric Authentication / Hash Algorithms */
+enum rte_crypto_auth_algorithm {
+ RTE_CRYPTO_AUTH_NULL = 1,
+ /**< NULL hash algorithm. */
+
+ RTE_CRYPTO_AUTH_AES_CBC_MAC,
+ /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
+ RTE_CRYPTO_AUTH_AES_CCM,
+ /**< AES algorithm in CCM mode. This is an authenticated cipher. When
+ * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
+ * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
+ * set up the related rte_crypto_cipher_setup_data structure in the
+ * session context or the corresponding parameter in the crypto
+ * operation data structures op_params parameter MUST be set for a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_CMAC,
+ /**< AES CMAC algorithm. */
+ RTE_CRYPTO_AUTH_AES_GCM,
+ /**< AES algorithm in GCM mode. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_GMAC,
+ /**< AES GMAC algorithm. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ /**< AES XCBC algorithm. */
+
+ RTE_CRYPTO_AUTH_KASUMI_F9,
+ /**< Kasumi algorithm in F9 mode. */
+
+ RTE_CRYPTO_AUTH_MD5,
+ /**< MD5 algorithm */
+ RTE_CRYPTO_AUTH_MD5_HMAC,
+ /**< HMAC using MD5 algorithm */
+
+ RTE_CRYPTO_AUTH_SHA1,
+ /**< 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA1_HMAC,
+ /**< HMAC using 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224,
+ /**< 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224_HMAC,
+ /**< HMAC using 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256,
+ /**< 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256_HMAC,
+ /**< HMAC using 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384,
+ /**< 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384_HMAC,
+ /**< HMAC using 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512,
+ /**< 512 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512_HMAC,
+ /**< HMAC using 512 bit SHA algorithm. */
+
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ /**< SNOW3G algorithm in UIA2 mode. */
+
+ RTE_CRYPTO_AUTH_ZUC_EIA3,
+ /**< ZUC algorithm in EIA3 mode */
+};
+
+/** Symmetric Authentication / Hash Operations */
+enum rte_crypto_auth_operation {
+ RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
+ RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
+};
+
+/**
+ * Authentication / Hash transform data.
+ *
+ * This structure contains data relating to an authentication/hash crypto
+ * transforms. The fields op, algo and digest_length are common to all
+ * authentication transforms and MUST be set.
+ */
+struct rte_crypto_auth_xform {
+ enum rte_crypto_auth_operation op;
+ /**< Authentication operation type */
+ enum rte_crypto_auth_algorithm algo;
+ /**< Authentication algorithm selection */
+
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } key;
+ /**< Authentication key data.
+ * The authentication key length MUST be less than or equal to the
+ * block size of the algorithm. It is the callers responsibility to
+ * ensure that the key length is compliant with the standard being used
+ * (for example RFC 2104, FIPS 198a).
+ */
+
+ uint32_t digest_length;
+ /**< Length of the digest to be returned. If the verify option is set,
+ * this specifies the length of the digest to be compared for the
+ * session.
+ *
+ * If the value is less than the maximum length allowed by the hash,
+ * the result shall be truncated. If the value is greater than the
+ * maximum length allowed by the hash then an error will be generated
+ * by *rte_cryptodev_sym_session_create* or by the
+ * *rte_cryptodev_sym_enqueue_burst* if using session-less APIs.
+ */
+
+ uint32_t add_auth_data_length;
+ /**< The length of the additional authenticated data (AAD) in bytes.
+ * The maximum permitted value is 240 bytes, unless otherwise specified
+ * below.
+ *
+ * This field must be specified when the hash algorithm is one of the
+ * following:
+ *
+ * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
+ * length of the IV (which should be 16).
+ *
+ * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
+ * the length of the Additional Authenticated Data (called A, in NIST
+ * SP800-38D).
+ *
+ * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
+ * the length of the associated data (called A, in NIST SP800-38C).
+ * Note that this does NOT include the length of any padding, or the
+ * 18 bytes reserved at the start of the above field to store the
+ * block B0 and the encoded length. The maximum permitted value in
+ * this case is 222 bytes.
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
+ * this field is not used and should be set to 0. Instead the length
+ * of the AAD data is specified in the message length to hash field of
+ * the rte_crypto_sym_op_data structure.
+ */
+};
+
+/** Crypto transformation types */
+enum rte_crypto_sym_xform_type {
+ RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
+ RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */
+ RTE_CRYPTO_SYM_XFORM_CIPHER /**< Cipher xform */
+};
+
+/**
+ * Symmetric crypto transform structure.
+ *
+ * This is used to specify the crypto transforms required, multiple transforms
+ * can be chained together to specify a chain transforms such as authentication
+ * then cipher, or cipher then authentication. Each transform structure can
+ * hold a single transform, the type field is used to specify which transform
+ * is contained within the union
+ */
+struct rte_crypto_sym_xform {
+ struct rte_crypto_sym_xform *next;
+ /**< next xform in chain */
+ enum rte_crypto_sym_xform_type type
+ ; /**< xform type */
+ union {
+ struct rte_crypto_auth_xform auth;
+ /**< Authentication / hash xform */
+ struct rte_crypto_cipher_xform cipher;
+ /**< Cipher xform */
+ };
+};
+
+/**
+ * Crypto operation session type. This is used to specify whether a crypto
+ * operation has session structure attached for immutable parameters or if all
+ * operation information is included in the operation data structure.
+ */
+enum rte_crypto_sym_op_sess_type {
+ RTE_CRYPTO_SYM_OP_WITH_SESSION, /**< Session based crypto operation */
+ RTE_CRYPTO_SYM_OP_SESSIONLESS /**< Session-less crypto operation */
+};
+
+
+/** Status of crypto operation */
+enum rte_crypto_op_status {
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ /**< Operation completed successfully */
+ RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
+ /**< Operation not yet submitted to a cryptodev */
+ RTE_CRYPTO_OP_STATUS_ENQUEUED,
+ /**< Operation is enqueued on device */
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
+ /**< Authentication verification failed */
+ RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
+ /**< Operation failed due to invalid arguments in request */
+ RTE_CRYPTO_OP_STATUS_ERROR,
+ /**< Error handling operation */
+};
+
+/**
+ * Cryptographic Operation Data.
+ *
+ * This structure contains data relating to performing cryptographic processing
+ * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
+ * call for performing cipher, hash, or a combined hash and cipher operations.
+ */
+struct rte_crypto_sym_op {
+ enum rte_crypto_sym_op_sess_type type;
+ enum rte_crypto_op_status status;
+
+ struct {
+ struct rte_mbuf *m; /**< Destination mbuf */
+ uint8_t offset; /**< Data offset */
+ } dst;
+
+ union {
+ struct rte_cryptodev_sym_session *session;
+ /**< Handle for the initialised session context */
+ struct rte_crypto_sym_xform *xform;
+ /**< Session-less API crypto operation parameters */
+ };
+
+ struct {
+ struct {
+ uint32_t offset;
+ /**< Starting point for cipher processing, specified
+ * as number of bytes from start of data in the source
+ * buffer. The result of the cipher operation will be
+ * written back into the output buffer starting at
+ * this location.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source buffer
+ * on which the cryptographic operation will be
+ * computed. This must be a multiple of the block size
+ * if a block cipher is being used. This is also the
+ * same as the result length.
+ *
+ * @note
+ * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
+ * this value should not include the length of the
+ * padding or the length of the MAC; the driver will
+ * compute the actual number of bytes over which the
+ * encryption will occur, which will include these
+ * values.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
+ * field should be set to 0.
+ */
+ } to_cipher; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint32_t offset;
+ /**< Starting point for hash processing, specified as
+ * number of bytes from start of packet in source
+ * buffer.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
+ * mode of operation, this field specifies the start
+ * of the AAD data in the source buffer.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source
+ * buffer that the hash will be computed on.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
+ * of operation, this field specifies the length of
+ * the AAD data in the source buffer.
+ */
+ } to_hash; /**< Data offsets and length for authentication */
+ } data; /**< Details of data to be operated on */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
+ * Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length is 96
+ * bits) or J0 (for other sizes), where J0 is as defined by
+ * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
+ * needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the nonce
+ * should be written starting at &iv[1] (to allow space for the
+ * implementation to write in the flags in the first byte).
+ * Note that a full 16 bytes should be allocated, even though
+ * the length field will have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
+ * 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD be
+ * 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ size_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the length of the
+ * IV (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length of the
+ * counter (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
+ * which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce, which can
+ * be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result should be
+ * inserted (in the case of digest generation) or where the
+ * purported digest exists (in the case of digest
+ * verification).
+ *
+ * At session creation time, the client specified the digest
+ * result length with the digest_length member of the @ref
+ * rte_crypto_auth_xform structure. For physical crypto
+ * devices the caller must allocate at least digest_length of
+ * physically contiguous memory at this location.
+ *
+ * For digest generation, the digest result will overwrite
+ * any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is understood
+ * to be in the destination buffer for digest generation, and
+ * in the source buffer for digest verification. The location
+ * of the digest result in this case is immediately following
+ * the region over which the digest is computed.
+ */
+ phys_addr_t phys_addr; /**< Physical address of digest */
+ uint32_t length; /**< Length of digest */
+ } digest; /**< Digest parameters */
+
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD) needed for
+ * authenticated cipher mechanisms (CCM and GCM), and to the IV
+ * for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is set up
+ * for the session in the @ref rte_crypto_auth_xform structure
+ * as part of the @ref rte_cryptodev_sym_session_create function
+ * call. This length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
+ * caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset of one
+ * byte into the array, leaving room for the implementation
+ * to write in the flags to the first byte.
+ *
+ * - the additional authentication data itself should be
+ * written starting at an offset of 18 bytes into the array,
+ * leaving room for the length encoding in the first two
+ * bytes of the second block.
+ *
+ * - the array should be big enough to hold the above fields,
+ * plus any padding to round this up to the nearest multiple
+ * of the block size (16 bytes). Padding will be added by
+ * the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus any
+ * space to round this up to the nearest multiple of the
+ * block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set to 0.
+ * Instead the AAD data should be placed in the source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint32_t length; /**< Length of digest */
+ } additional_auth;
+ /**< Additional authentication parameters */
+
+ struct rte_mempool *pool;
+ /**< mempool used to allocate crypto op */
+
+ void *user_data;
+ /**< opaque pointer for user data */
+};
+
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ */
+static inline void
+__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
+{
+ op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
+ op->dst.m = NULL;
+ op->dst.offset = 0;
+}
+
+/** Attach a session to a crypto operation */
+static inline void
+rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ op->session = sess;
+ op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTO_SYM_H_ */
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 2838852..c7fef6a 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -572,8 +572,8 @@ rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id);
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id);
int
rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
@@ -604,8 +604,10 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
}
/* Setup Session mempool for device */
- return rte_crypto_session_pool_create(dev, config->session_mp.nb_objs,
- config->session_mp.cache_size, config->socket_id);
+ return rte_cryptodev_sym_session_pool_create(dev,
+ config->session_mp.nb_objs,
+ config->session_mp.cache_size,
+ config->socket_id);
}
@@ -911,12 +913,12 @@ rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
static void
-rte_crypto_session_init(struct rte_mempool *mp,
+rte_cryptodev_sym_session_init(struct rte_mempool *mp,
void *opaque_arg,
void *_sess,
__rte_unused unsigned i)
{
- struct rte_cryptodev_session *sess = _sess;
+ struct rte_cryptodev_sym_session *sess = _sess;
struct rte_cryptodev *dev = opaque_arg;
memset(sess, 0, mp->elt_size);
@@ -930,8 +932,8 @@ rte_crypto_session_init(struct rte_mempool *mp,
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id)
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id)
{
char mp_name[RTE_CRYPTODEV_NAME_MAX_LEN];
unsigned priv_sess_size;
@@ -951,7 +953,7 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
return -ENOMEM;
}
- unsigned elt_size = sizeof(struct rte_cryptodev_session) +
+ unsigned elt_size = sizeof(struct rte_cryptodev_sym_session) +
priv_sess_size;
dev->data->session_pool = rte_mempool_lookup(mp_name);
@@ -975,7 +977,8 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
0, /* private data size */
NULL, /* obj initialization constructor */
NULL, /* obj initialization constructor arg */
- rte_crypto_session_init, /* obj constructor */
+ rte_cryptodev_sym_session_init,
+ /**< obj constructor*/
dev, /* obj constructor arg */
socket_id, /* socket id */
0); /* flags */
@@ -990,11 +993,12 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
return 0;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform)
{
struct rte_cryptodev *dev;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
void *_sess;
if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
@@ -1010,7 +1014,7 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
return NULL;
}
- sess = (struct rte_cryptodev_session *)_sess;
+ sess = (struct rte_cryptodev_sym_session *)_sess;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_configure, NULL);
if (dev->dev_ops->session_configure(dev, xform, sess->_private) ==
@@ -1026,8 +1030,9 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
return sess;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id, struct rte_cryptodev_session *sess)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess)
{
struct rte_cryptodev *dev;
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 892375d..732e2b9 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -1,6 +1,6 @@
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,14 +57,14 @@ extern "C" {
/**< Null crypto PMD device name */
#define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd")
/**< AES-NI Multi buffer PMD device name */
-#define CRYPTODEV_NAME_QAT_PMD ("cryptodev_qat_pmd")
-/**< Intel QAT PMD device name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd")
+/**< Intel QAT SYM PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
- RTE_CRYPTODEV_QAT_PMD, /**< QAT PMD */
+ RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT SYM PMD */
};
/* Logging Macros */
@@ -99,8 +99,11 @@ struct rte_cryptodev_info {
unsigned max_nb_queue_pairs;
/**< Maximum number of queues pairs supported by device. */
- unsigned max_nb_sessions;
- /**< Maximum number of sessions supported by device. */
+
+ struct {
+ unsigned max_nb_sessions;
+ /**< Maximum number of sessions supported by device. */
+ } sym;
};
#define RTE_CRYPTODEV_DETACHED (0)
@@ -130,17 +133,6 @@ struct rte_cryptodev_qp_conf {
typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
enum rte_cryptodev_event_type event, void *cb_arg);
-#ifdef RTE_CRYPTODEV_PERF
-/**
- * Crypto Device performance counter statistics structure. This structure is
- * used for RDTSC counters for measuring crypto operations.
- */
-struct rte_cryptodev_perf_stats {
- uint64_t t_accumlated; /**< Accumulated time processing operation */
- uint64_t t_min; /**< Max time */
- uint64_t t_max; /**< Min time */
-};
-#endif
/** Crypto Device statistics */
struct rte_cryptodev_stats {
@@ -153,29 +145,8 @@ struct rte_cryptodev_stats {
/**< Total error count on operations enqueued */
uint64_t dequeue_err_count;
/**< Total error count on operations dequeued */
+};
-#ifdef RTE_CRYPTODEV_DETAILED_STATS
- struct {
- uint64_t encrypt_ops; /**< Count of encrypt operations */
- uint64_t encrypt_bytes; /**< Number of bytes encrypted */
-
- uint64_t decrypt_ops; /**< Count of decrypt operations */
- uint64_t decrypt_bytes; /**< Number of bytes decrypted */
- } cipher; /**< Cipher operations stats */
-
- struct {
- uint64_t generate_ops; /**< Count of generate operations */
- uint64_t bytes_hashed; /**< Number of bytes hashed */
-
- uint64_t verify_ops; /**< Count of verify operations */
- uint64_t bytes_verified;/**< Number of bytes verified */
- } hash; /**< Hash operations stats */
-#endif
-
-#ifdef RTE_CRYPTODEV_PERF
- struct rte_cryptodev_perf_stats op_perf; /**< Operations stats */
-#endif
-} __rte_cache_aligned;
/**
* Create a virtual crypto device
@@ -607,6 +578,23 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
}
+/** Cryptodev symmetric crypto session */
+struct rte_cryptodev_sym_session {
+ struct {
+ uint8_t dev_id;
+ /**< Device Id */
+ enum rte_cryptodev_type type;
+ /** Crypto Device type session created on */
+ struct rte_mempool *mp;
+ /**< Mempool session allocated from */
+ } __rte_aligned(8);
+ /**< Public symmetric session details */
+
+ char _private[0];
+ /**< Private session material */
+};
+
+
/**
* Initialise a session for symmetric cryptographic operations.
*
@@ -628,9 +616,9 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
* @return
* Pointer to the created session or NULL
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id,
- struct rte_crypto_xform *xform);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform);
/**
@@ -638,15 +626,15 @@ rte_cryptodev_session_create(uint8_t dev_id,
*
* @param dev_id The device identifier.
* @param session Session pointer previously allocated by
- * *rte_cryptodev_session_create*.
+ * *rte_cryptodev_sym_session_create*.
*
* @return
* NULL on successful freeing of session.
* Session pointer on failure to free session.
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id,
- struct rte_cryptodev_session *session);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *session);
#ifdef __cplusplus
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index 8270afa..7d049ea 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -1,6 +1,6 @@
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,11 +56,6 @@ extern "C" {
#include "rte_crypto.h"
#include "rte_cryptodev.h"
-struct rte_cryptodev_stats;
-struct rte_cryptodev_info;
-struct rte_cryptodev_qp_conf;
-
-enum rte_cryptodev_event_type;
#ifdef RTE_LIBRTE_CRYPTODEV_DEBUG
#define RTE_PMD_DEBUG_TRACE(...) \
@@ -80,7 +75,6 @@ struct rte_cryptodev_session {
};
struct rte_cryptodev_driver;
-struct rte_cryptodev;
/**
* Initialisation function of a crypto driver invoked for each matching
@@ -364,7 +358,7 @@ typedef uint32_t (*cryptodev_queue_pair_count_t)(struct rte_cryptodev *dev);
* - On success returns a pointer to a rte_mempool
* - On failure returns a NULL pointer
*/
-typedef int (*cryptodev_create_session_pool_t)(
+typedef int (*cryptodev_sym_create_session_pool_t)(
struct rte_cryptodev *dev, unsigned nb_objs,
unsigned obj_cache_size, int socket_id);
@@ -378,7 +372,7 @@ typedef int (*cryptodev_create_session_pool_t)(
* - On success returns the size of the session structure for device
* - On failure returns 0
*/
-typedef unsigned (*cryptodev_get_session_private_size_t)(
+typedef unsigned (*cryptodev_sym_get_session_private_size_t)(
struct rte_cryptodev *dev);
/**
@@ -392,7 +386,7 @@ typedef unsigned (*cryptodev_get_session_private_size_t)(
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
+typedef void (*cryptodev_sym_initialize_session_t)(struct rte_mempool *mempool,
void *session_private);
/**
@@ -406,14 +400,14 @@ typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void * (*cryptodev_configure_session_t)(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+typedef void * (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
/**
* Free Crypto session.
* @param session Cryptodev session structure to free
*/
-typedef void (*cryptodev_free_session_t)(struct rte_cryptodev *dev,
+typedef void (*cryptodev_sym_free_session_t)(struct rte_cryptodev *dev,
void *session_private);
@@ -427,9 +421,9 @@ struct rte_cryptodev_ops {
cryptodev_info_get_t dev_infos_get; /**< Get device info. */
cryptodev_stats_get_t stats_get;
- /**< Get generic device statistics. */
+ /**< Get device statistics. */
cryptodev_stats_reset_t stats_reset;
- /**< Reset generic device statistics. */
+ /**< Reset device statistics. */
cryptodev_queue_pair_setup_t queue_pair_setup;
/**< Set up a device queue pair. */
@@ -442,13 +436,13 @@ struct rte_cryptodev_ops {
cryptodev_queue_pair_count_t queue_pair_count;
/**< Get count of the queue pairs. */
- cryptodev_get_session_private_size_t session_get_size;
+ cryptodev_sym_get_session_private_size_t session_get_size;
/**< Return private session. */
- cryptodev_initialize_session_t session_initialize;
+ cryptodev_sym_initialize_session_t session_initialize;
/**< Initialization function for private session data */
- cryptodev_configure_session_t session_configure;
+ cryptodev_sym_configure_session_t session_configure;
/**< Configure a Crypto session. */
- cryptodev_free_session_t session_clear;
+ cryptodev_sym_free_session_t session_clear;
/**< Clear a Crypto sessions private data. */
};
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index ff8e93d..a46af6f 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -16,8 +16,8 @@ DPDK_2.2 {
rte_cryptodev_pmd_driver_register;
rte_cryptodev_pmd_release_device;
rte_cryptodev_pmd_virtual_dev_init;
- rte_cryptodev_session_create;
- rte_cryptodev_session_free;
+ rte_cryptodev_sym_session_create;
+ rte_cryptodev_sym_session_free;
rte_cryptodev_socket_id;
rte_cryptodev_start;
rte_cryptodev_stats_get;
@@ -29,4 +29,4 @@ DPDK_2.2 {
rte_cryptodev_queue_pair_stop;
local: *;
-};
\ No newline at end of file
+};
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
index 77993b6..5ce6058 100644
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ b/lib/librte_mbuf_offload/rte_mbuf_offload.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -67,7 +67,7 @@ extern "C" {
enum rte_mbuf_ol_op_type {
RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
/**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO
+ RTE_PKTMBUF_OL_CRYPTO_SYM
/**< Crypto offload operation */
};
@@ -84,7 +84,7 @@ struct rte_mbuf_offload {
enum rte_mbuf_ol_op_type type; /**< offload type */
union {
- struct rte_crypto_op crypto; /**< Crypto operation */
+ struct rte_crypto_sym_op crypto; /**< Crypto operation */
} op;
};
@@ -194,8 +194,8 @@ __rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
ol->type = type;
switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO:
- __rte_crypto_op_reset(&ol->op.crypto); break;
+ case RTE_PKTMBUF_OL_CRYPTO_SYM:
+ __rte_crypto_sym_op_reset(&ol->op.crypto); break;
default:
break;
}
@@ -278,24 +278,24 @@ __rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
* - On success returns pointer to first crypto xform in crypto operations chain
* - On failure returns NULL
*/
-static inline struct rte_crypto_xform *
-rte_pktmbuf_offload_alloc_crypto_xforms(struct rte_mbuf_offload *ol,
+static inline struct rte_crypto_sym_xform *
+rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
unsigned nb_xforms)
{
- struct rte_crypto_xform *xform;
+ struct rte_crypto_sym_xform *xform;
void *priv_data;
uint16_t size;
- size = sizeof(struct rte_crypto_xform) * nb_xforms;
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
if (priv_data == NULL)
return NULL;
- ol->op.crypto.xform = xform = (struct rte_crypto_xform *)priv_data;
+ ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
do {
- xform->type = RTE_CRYPTO_XFORM_NOT_SPECIFIED;
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
} while (xform);
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v5 2/2] This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts rte_crypto_op's rather than the current implementation which operates on rte_mbuf bursts, this simplifies the burst processing in the crypto PMDs and the use of crypto operations in general.
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 " Declan Doherty
` (4 preceding siblings ...)
2016-03-04 17:17 ` [dpdk-dev] [PATCH v5 1/2] This patch splits symmetric specific definitions and functions away from the common crypto APIs to facilitate the future extension and expansion of the cryptodev framework, in order to allow asymmetric crypto operations to be introduced at a later date, as well as to clean the logical structure of the public includes. The patch also introduces the _sym prefix to symmetric specific structure and functions to improve clarity in the API Fiona Trahe
@ 2016-03-04 17:17 ` Fiona Trahe
2016-03-04 18:29 ` [dpdk-dev] [PATCH v6 0/2] cryptodev API changes Fiona Trahe
` (3 subsequent siblings)
9 siblings, 0 replies; 62+ messages in thread
From: Fiona Trahe @ 2016-03-04 17:17 UTC (permalink / raw)
To: dev
The changes also continues the separatation of the symmetric operation parameters
from the more general operation parameters, this will simplify the integration of
asymmetric crypto operations in the future.
As well as the changes to the crypto APIs this patch adds functions for managing
rte_crypto_op pools to the cryptodev API. It modifies the existing PMDs, unit
tests and sample application to work with the modified APIs and finally
removes the now unused rte_mbuf_offload library.
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
---
MAINTAINERS | 6 +-
app/test/test_cryptodev.c | 804 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 253 +++----
config/common_bsdapp | 8 -
config/common_linuxapp | 8 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 171 +++--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 12 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 2 +-
drivers/crypto/qat/qat_crypto.c | 123 ++--
drivers/crypto/qat/qat_crypto.h | 12 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 4 +-
examples/l2fwd-crypto/main.c | 283 ++++----
lib/Makefile | 1 -
lib/librte_cryptodev/rte_crypto.h | 364 +++++++++-
lib/librte_cryptodev/rte_crypto_sym.h | 379 +++++-----
lib/librte_cryptodev/rte_cryptodev.c | 76 ++
lib/librte_cryptodev/rte_cryptodev.h | 113 ++-
lib/librte_cryptodev/rte_cryptodev_version.map | 5 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 --------
.../rte_mbuf_offload_version.map | 7 -
25 files changed, 1576 insertions(+), 1533 deletions(-)
delete mode 100644 lib/librte_mbuf_offload/Makefile
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
diff --git a/MAINTAINERS b/MAINTAINERS
index 628bc05..ad6b45e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -222,16 +222,12 @@ F: lib/librte_mbuf/
F: doc/guides/prog_guide/mbuf_lib.rst
F: app/test/test_mbuf.c
-Packet buffer offload - EXPERIMENTAL
-M: Declan Doherty <declan.doherty@intel.com>
-F: lib/librte_mbuf_offload/
-
Ethernet API
M: Thomas Monjalon <thomas.monjalon@6wind.com>
F: lib/librte_ether/
F: scripts/test-null.sh
-Crypto API - EXPERIMENTAL
+Crypto API
M: Declan Doherty <declan.doherty@intel.com>
F: lib/librte_cryptodev/
F: app/test/test_cryptodev*
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 951b443..208fc14 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -35,7 +35,6 @@
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#include <rte_mbuf_offload.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
@@ -48,7 +47,7 @@ static enum rte_cryptodev_type gbl_cryptodev_type;
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
struct rte_cryptodev_config conf;
struct rte_cryptodev_qp_conf qp_conf;
@@ -62,8 +61,7 @@ struct crypto_unittest_params {
struct rte_cryptodev_sym_session *sess;
- struct rte_mbuf_offload *ol;
- struct rte_crypto_sym_op *op;
+ struct rte_crypto_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -104,7 +102,7 @@ setup_test_string(struct rte_mempool *mpool,
return m;
}
-#if HEX_DUMP
+#ifdef HEX_DUMP
static void
hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
{
@@ -112,27 +110,29 @@ hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
}
#endif
-static struct rte_mbuf *
-process_crypto_request(uint8_t dev_id, struct rte_mbuf *ibuf)
+static struct rte_crypto_op *
+process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)
{
- struct rte_mbuf *obuf = NULL;
-#if HEX_DUMP
+#ifdef HEX_DUMP
hexdump_mbuf_data(stdout, "Enqueued Packet", ibuf);
#endif
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &ibuf, 1) != 1) {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
printf("Error sending packet for encryption");
return NULL;
}
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &obuf, 1) == 0)
+
+ op = NULL;
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
rte_pause();
-#if HEX_DUMP
+#ifdef HEX_DUMP
if (obuf)
hexdump_mbuf_data(stdout, "Dequeued Packet", obuf);
#endif
- return obuf;
+ return op;
}
static struct crypto_testsuite_params testsuite_params = { NULL };
@@ -162,13 +162,14 @@ testsuite_setup(void)
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "MBUF_OFFLOAD_POOL",
+ ts_params->op_mpool = rte_crypto_op_pool_create(
+ "MBUF_CRYPTO_SYM_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_sym_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -253,10 +254,9 @@ testsuite_teardown(void)
rte_mempool_count(ts_params->mbuf_pool));
}
-
- if (ts_params->mbuf_ol_pool != NULL) {
+ if (ts_params->op_mpool != NULL) {
RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n",
- rte_mempool_count(ts_params->mbuf_ol_pool));
+ rte_mempool_count(ts_params->op_mpool));
}
}
@@ -326,8 +326,8 @@ ut_teardown(void)
}
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
/*
* free mbuf - both obuf and ibuf are usually the same,
@@ -680,76 +680,77 @@ static uint8_t aes_cbc_iv[] = {
/* ***** AES-CBC / HMAC-SHA1 Hash Tests ***** */
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_ciphertext[] = {
- 0x8B, 0X4D, 0XDA, 0X1B, 0XCF, 0X04, 0XA0, 0X31,
- 0XB4, 0XBF, 0XBD, 0X68, 0X43, 0X20, 0X7E, 0X76,
- 0XB1, 0X96, 0X8B, 0XA2, 0X7C, 0XA2, 0X83, 0X9E,
- 0X39, 0X5A, 0X2F, 0X7E, 0X92, 0XB4, 0X48, 0X1A,
- 0X3F, 0X6B, 0X5D, 0XDF, 0X52, 0X85, 0X5F, 0X8E,
- 0X42, 0X3C, 0XFB, 0XE9, 0X1A, 0X24, 0XD6, 0X08,
- 0XDD, 0XFD, 0X16, 0XFB, 0XE9, 0X55, 0XEF, 0XF0,
- 0XA0, 0X8D, 0X13, 0XAB, 0X81, 0XC6, 0X90, 0X01,
- 0XB5, 0X18, 0X84, 0XB3, 0XF6, 0XE6, 0X11, 0X57,
- 0XD6, 0X71, 0XC6, 0X3C, 0X3F, 0X2F, 0X33, 0XEE,
- 0X24, 0X42, 0X6E, 0XAC, 0X0B, 0XCA, 0XEC, 0XF9,
- 0X84, 0XF8, 0X22, 0XAA, 0X60, 0XF0, 0X32, 0XA9,
- 0X75, 0X75, 0X3B, 0XCB, 0X70, 0X21, 0X0A, 0X8D,
- 0X0F, 0XE0, 0XC4, 0X78, 0X2B, 0XF8, 0X97, 0XE3,
- 0XE4, 0X26, 0X4B, 0X29, 0XDA, 0X88, 0XCD, 0X46,
- 0XEC, 0XAA, 0XF9, 0X7F, 0XF1, 0X15, 0XEA, 0XC3,
- 0X87, 0XE6, 0X31, 0XF2, 0XCF, 0XDE, 0X4D, 0X80,
- 0X70, 0X91, 0X7E, 0X0C, 0XF7, 0X26, 0X3A, 0X92,
- 0X4F, 0X18, 0X83, 0XC0, 0X8F, 0X59, 0X01, 0XA5,
- 0X88, 0XD1, 0XDB, 0X26, 0X71, 0X27, 0X16, 0XF5,
- 0XEE, 0X10, 0X82, 0XAC, 0X68, 0X26, 0X9B, 0XE2,
- 0X6D, 0XD8, 0X9A, 0X80, 0XDF, 0X04, 0X31, 0XD5,
- 0XF1, 0X35, 0X5C, 0X3B, 0XDD, 0X9A, 0X65, 0XBA,
- 0X58, 0X34, 0X85, 0X61, 0X1C, 0X42, 0X10, 0X76,
- 0X73, 0X02, 0X42, 0XC9, 0X23, 0X18, 0X8E, 0XB4,
- 0X6F, 0XB4, 0XA3, 0X54, 0X6E, 0X88, 0X3B, 0X62,
- 0X7C, 0X02, 0X8D, 0X4C, 0X9F, 0XC8, 0X45, 0XF4,
- 0XC9, 0XDE, 0X4F, 0XEB, 0X22, 0X83, 0X1B, 0XE4,
- 0X49, 0X37, 0XE4, 0XAD, 0XE7, 0XCD, 0X21, 0X54,
- 0XBC, 0X1C, 0XC2, 0X04, 0X97, 0XB4, 0X10, 0X61,
- 0XF0, 0XE4, 0XEF, 0X27, 0X63, 0X3A, 0XDA, 0X91,
- 0X41, 0X25, 0X62, 0X1C, 0X5C, 0XB6, 0X38, 0X4A,
- 0X88, 0X71, 0X59, 0X5A, 0X8D, 0XA0, 0X09, 0XAF,
- 0X72, 0X94, 0XD7, 0X79, 0X5C, 0X60, 0X7C, 0X8F,
- 0X4C, 0XF5, 0XD9, 0XA1, 0X39, 0X6D, 0X81, 0X28,
- 0XEF, 0X13, 0X28, 0XDF, 0XF5, 0X3E, 0XF7, 0X8E,
- 0X09, 0X9C, 0X78, 0X18, 0X79, 0XB8, 0X68, 0XD7,
- 0XA8, 0X29, 0X62, 0XAD, 0XDE, 0XE1, 0X61, 0X76,
- 0X1B, 0X05, 0X16, 0XCD, 0XBF, 0X02, 0X8E, 0XA6,
- 0X43, 0X6E, 0X92, 0X55, 0X4F, 0X60, 0X9C, 0X03,
- 0XB8, 0X4F, 0XA3, 0X02, 0XAC, 0XA8, 0XA7, 0X0C,
- 0X1E, 0XB5, 0X6B, 0XF8, 0XC8, 0X4D, 0XDE, 0XD2,
- 0XB0, 0X29, 0X6E, 0X40, 0XE6, 0XD6, 0XC9, 0XE6,
- 0XB9, 0X0F, 0XB6, 0X63, 0XF5, 0XAA, 0X2B, 0X96,
- 0XA7, 0X16, 0XAC, 0X4E, 0X0A, 0X33, 0X1C, 0XA6,
- 0XE6, 0XBD, 0X8A, 0XCF, 0X40, 0XA9, 0XB2, 0XFA,
- 0X63, 0X27, 0XFD, 0X9B, 0XD9, 0XFC, 0XD5, 0X87,
- 0X8D, 0X4C, 0XB6, 0XA4, 0XCB, 0XE7, 0X74, 0X55,
- 0XF4, 0XFB, 0X41, 0X25, 0XB5, 0X4B, 0X0A, 0X1B,
- 0XB1, 0XD6, 0XB7, 0XD9, 0X47, 0X2A, 0XC3, 0X98,
- 0X6A, 0XC4, 0X03, 0X73, 0X1F, 0X93, 0X6E, 0X53,
- 0X19, 0X25, 0X64, 0X15, 0X83, 0XF9, 0X73, 0X2A,
- 0X74, 0XB4, 0X93, 0X69, 0XC4, 0X72, 0XFC, 0X26,
- 0XA2, 0X9F, 0X43, 0X45, 0XDD, 0XB9, 0XEF, 0X36,
- 0XC8, 0X3A, 0XCD, 0X99, 0X9B, 0X54, 0X1A, 0X36,
- 0XC1, 0X59, 0XF8, 0X98, 0XA8, 0XCC, 0X28, 0X0D,
- 0X73, 0X4C, 0XEE, 0X98, 0XCB, 0X7C, 0X58, 0X7E,
- 0X20, 0X75, 0X1E, 0XB7, 0XC9, 0XF8, 0XF2, 0X0E,
- 0X63, 0X9E, 0X05, 0X78, 0X1A, 0XB6, 0XA8, 0X7A,
- 0XF9, 0X98, 0X6A, 0XA6, 0X46, 0X84, 0X2E, 0XF6,
- 0X4B, 0XDC, 0X9B, 0X8F, 0X9B, 0X8F, 0XEE, 0XB4,
- 0XAA, 0X3F, 0XEE, 0XC0, 0X37, 0X27, 0X76, 0XC7,
- 0X95, 0XBB, 0X26, 0X74, 0X69, 0X12, 0X7F, 0XF1,
- 0XBB, 0XFF, 0XAE, 0XB5, 0X99, 0X6E, 0XCB, 0X0C
+ 0x8B, 0x4D, 0xDA, 0x1B, 0xCF, 0x04, 0xA0, 0x31,
+ 0xB4, 0xBF, 0xBD, 0x68, 0x43, 0x20, 0x7E, 0x76,
+ 0xB1, 0x96, 0x8B, 0xA2, 0x7C, 0xA2, 0x83, 0x9E,
+ 0x39, 0x5A, 0x2F, 0x7E, 0x92, 0xB4, 0x48, 0x1A,
+ 0x3F, 0x6B, 0x5D, 0xDF, 0x52, 0x85, 0x5F, 0x8E,
+ 0x42, 0x3C, 0xFB, 0xE9, 0x1A, 0x24, 0xD6, 0x08,
+ 0xDD, 0xFD, 0x16, 0xFB, 0xE9, 0x55, 0xEF, 0xF0,
+ 0xA0, 0x8D, 0x13, 0xAB, 0x81, 0xC6, 0x90, 0x01,
+ 0xB5, 0x18, 0x84, 0xB3, 0xF6, 0xE6, 0x11, 0x57,
+ 0xD6, 0x71, 0xC6, 0x3C, 0x3F, 0x2F, 0x33, 0xEE,
+ 0x24, 0x42, 0x6E, 0xAC, 0x0B, 0xCA, 0xEC, 0xF9,
+ 0x84, 0xF8, 0x22, 0xAA, 0x60, 0xF0, 0x32, 0xA9,
+ 0x75, 0x75, 0x3B, 0xCB, 0x70, 0x21, 0x0A, 0x8D,
+ 0x0F, 0xE0, 0xC4, 0x78, 0x2B, 0xF8, 0x97, 0xE3,
+ 0xE4, 0x26, 0x4B, 0x29, 0xDA, 0x88, 0xCD, 0x46,
+ 0xEC, 0xAA, 0xF9, 0x7F, 0xF1, 0x15, 0xEA, 0xC3,
+ 0x87, 0xE6, 0x31, 0xF2, 0xCF, 0xDE, 0x4D, 0x80,
+ 0x70, 0x91, 0x7E, 0x0C, 0xF7, 0x26, 0x3A, 0x92,
+ 0x4F, 0x18, 0x83, 0xC0, 0x8F, 0x59, 0x01, 0xA5,
+ 0x88, 0xD1, 0xDB, 0x26, 0x71, 0x27, 0x16, 0xF5,
+ 0xEE, 0x10, 0x82, 0xAC, 0x68, 0x26, 0x9B, 0xE2,
+ 0x6D, 0xD8, 0x9A, 0x80, 0xDF, 0x04, 0x31, 0xD5,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x73, 0x02, 0x42, 0xC9, 0x23, 0x18, 0x8E, 0xB4,
+ 0x6F, 0xB4, 0xA3, 0x54, 0x6E, 0x88, 0x3B, 0x62,
+ 0x7C, 0x02, 0x8D, 0x4C, 0x9F, 0xC8, 0x45, 0xF4,
+ 0xC9, 0xDE, 0x4F, 0xEB, 0x22, 0x83, 0x1B, 0xE4,
+ 0x49, 0x37, 0xE4, 0xAD, 0xE7, 0xCD, 0x21, 0x54,
+ 0xBC, 0x1C, 0xC2, 0x04, 0x97, 0xB4, 0x10, 0x61,
+ 0xF0, 0xE4, 0xEF, 0x27, 0x63, 0x3A, 0xDA, 0x91,
+ 0x41, 0x25, 0x62, 0x1C, 0x5C, 0xB6, 0x38, 0x4A,
+ 0x88, 0x71, 0x59, 0x5A, 0x8D, 0xA0, 0x09, 0xAF,
+ 0x72, 0x94, 0xD7, 0x79, 0x5C, 0x60, 0x7C, 0x8F,
+ 0x4C, 0xF5, 0xD9, 0xA1, 0x39, 0x6D, 0x81, 0x28,
+ 0xEF, 0x13, 0x28, 0xDF, 0xF5, 0x3E, 0xF7, 0x8E,
+ 0x09, 0x9C, 0x78, 0x18, 0x79, 0xB8, 0x68, 0xD7,
+ 0xA8, 0x29, 0x62, 0xAD, 0xDE, 0xE1, 0x61, 0x76,
+ 0x1B, 0x05, 0x16, 0xCD, 0xBF, 0x02, 0x8E, 0xA6,
+ 0x43, 0x6E, 0x92, 0x55, 0x4F, 0x60, 0x9C, 0x03,
+ 0xB8, 0x4F, 0xA3, 0x02, 0xAC, 0xA8, 0xA7, 0x0C,
+ 0x1E, 0xB5, 0x6B, 0xF8, 0xC8, 0x4D, 0xDE, 0xD2,
+ 0xB0, 0x29, 0x6E, 0x40, 0xE6, 0xD6, 0xC9, 0xE6,
+ 0xB9, 0x0F, 0xB6, 0x63, 0xF5, 0xAA, 0x2B, 0x96,
+ 0xA7, 0x16, 0xAC, 0x4E, 0x0A, 0x33, 0x1C, 0xA6,
+ 0xE6, 0xBD, 0x8A, 0xCF, 0x40, 0xA9, 0xB2, 0xFA,
+ 0x63, 0x27, 0xFD, 0x9B, 0xD9, 0xFC, 0xD5, 0x87,
+ 0x8D, 0x4C, 0xB6, 0xA4, 0xCB, 0xE7, 0x74, 0x55,
+ 0xF4, 0xFB, 0x41, 0x25, 0xB5, 0x4B, 0x0A, 0x1B,
+ 0xB1, 0xD6, 0xB7, 0xD9, 0x47, 0x2A, 0xC3, 0x98,
+ 0x6A, 0xC4, 0x03, 0x73, 0x1F, 0x93, 0x6E, 0x53,
+ 0x19, 0x25, 0x64, 0x15, 0x83, 0xF9, 0x73, 0x2A,
+ 0x74, 0xB4, 0x93, 0x69, 0xC4, 0x72, 0xFC, 0x26,
+ 0xA2, 0x9F, 0x43, 0x45, 0xDD, 0xB9, 0xEF, 0x36,
+ 0xC8, 0x3A, 0xCD, 0x99, 0x9B, 0x54, 0x1A, 0x36,
+ 0xC1, 0x59, 0xF8, 0x98, 0xA8, 0xCC, 0x28, 0x0D,
+ 0x73, 0x4C, 0xEE, 0x98, 0xCB, 0x7C, 0x58, 0x7E,
+ 0x20, 0x75, 0x1E, 0xB7, 0xC9, 0xF8, 0xF2, 0x0E,
+ 0x63, 0x9E, 0x05, 0x78, 0x1A, 0xB6, 0xA8, 0x7A,
+ 0xF9, 0x98, 0x6A, 0xA6, 0x46, 0x84, 0x2E, 0xF6,
+ 0x4B, 0xDC, 0x9B, 0x8F, 0x9B, 0x8F, 0xEE, 0xB4,
+ 0xAA, 0x3F, 0xEE, 0xC0, 0x37, 0x27, 0x76, 0xC7,
+ 0x95, 0xBB, 0x26, 0x74, 0x69, 0x12, 0x7F, 0xF1,
+ 0xBB, 0xFF, 0xAE, 0xB5, 0x99, 0x6E, 0xCB, 0x0C
};
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest[] = {
- 0x9a, 0X4f, 0X88, 0X1b, 0Xb6, 0X8f, 0Xd8, 0X60,
- 0X42, 0X1a, 0X7d, 0X3d, 0Xf5, 0X82, 0X80, 0Xf1,
- 0X18, 0X8c, 0X1d, 0X32 };
+ 0x9a, 0x4f, 0x88, 0x1b, 0xb6, 0x8f, 0xd8, 0x60,
+ 0x42, 0x1a, 0x7d, 0x3d, 0xf5, 0x82, 0x80, 0xf1,
+ 0x18, 0x8c, 0x1d, 0x32
+};
static int
@@ -776,7 +777,6 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
-
ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
@@ -786,59 +786,65 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.key.data = hmac_sha1_key;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
- /* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ /* Create crypto session*/
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
- /* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ /* Generate crypto op data structure */
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ /* Set crypto operation authentication parameters */
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ /* Set crypto operation cipher parameters */
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ uint8_t *ciphertext = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC);
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(ciphertext,
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
- "Ciphertext data not as expected");
+ "ciphertext data not as expected");
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ uint8_t *digest = ciphertext + QUOTE_512_BYTES;
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(digest,
catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest,
gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 :
@@ -863,60 +869,66 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
- ut_params->ol, 2),
+ TEST_ASSERT_NOT_NULL(rte_crypto_op_sym_xforms_alloc(ut_params->op, 2),
"failed to allocate space for crypto transforms");
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ sym_op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
/* cipher parameters */
- ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- ut_params->op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->op->xform->cipher.key.data = aes_cbc_key;
- ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
+ sym_op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ sym_op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ sym_op->xform->cipher.key.data = aes_cbc_key;
+ sym_op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ sym_op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- ut_params->op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
- ut_params->op->xform->next->auth.key.data = hmac_sha1_key;
- ut_params->op->xform->next->auth.digest_length =
+ sym_op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ sym_op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ sym_op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
+ sym_op->xform->next->auth.key.data = hmac_sha1_key;
+ sym_op->xform->next->auth.digest_length =
DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -985,42 +997,48 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ /* attach symmetric crypto session to crypto operations */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
+
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1088,47 +1106,51 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1197,42 +1219,47 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
- /* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1244,7 +1271,6 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
"Digest verification failed");
-
return TEST_SUCCESS;
}
@@ -1312,43 +1338,46 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
-
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1367,7 +1396,6 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
DIGEST_BYTE_LENGTH_SHA512,
"Generated digest data not as expected");
-
return TEST_SUCCESS;
}
@@ -1424,6 +1452,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
ut_params->auth_xform.auth.key.data = hmac_sha512_key;
ut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA512;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
+
return TEST_SUCCESS;
}
@@ -1448,43 +1477,46 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
- /* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1497,8 +1529,6 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
"Digest verification failed");
-
-
return TEST_SUCCESS;
}
@@ -1524,10 +1554,6 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
catch_22_quote, QUOTE_512_BYTES, 0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_AES_XCBC);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
@@ -1548,56 +1574,75 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->iv.data = (uint8_t *)
- rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ /* Set operation cipher parameters */
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->m_src, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(sym_op->m_src);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
+
+ /* Set operation authentication parameters */
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ sym_op->m_src, DIGEST_BYTE_LENGTH_AES_XCBC);
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->m_src,
+ CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES);
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+ memset(sym_op->auth.digest.data, 0, DIGEST_BYTE_LENGTH_AES_XCBC);
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC),
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
"Ciphertext data not as expected");
+
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ rte_pktmbuf_mtod_offset(
+ ut_params->op->sym->m_src, uint8_t *,
+ CIPHER_IV_LENGTH_AES_CBC +
+ QUOTE_512_BYTES),
catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
- DIGEST_BYTE_LENGTH_AES_XCBC,
+ gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
+ TRUNCATED_DIGEST_BYTE_LENGTH_AES_XCBC :
+ DIGEST_BYTE_LENGTH_AES_XCBC,
"Generated digest data not as expected");
return TEST_SUCCESS;
@@ -1614,14 +1659,6 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
(const char *)catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES, 0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_AES_XCBC);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
- rte_memcpy(ut_params->digest,
- catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
- DIGEST_BYTE_LENGTH_AES_XCBC);
-
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
@@ -1648,35 +1685,55 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ /* Set crypto operation data parameters */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ ut_params->ibuf, DIGEST_BYTE_LENGTH_AES_XCBC);
+ TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+ "no room to append digest");
+
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ ut_params->ibuf, QUOTE_512_BYTES);
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+ rte_memcpy(sym_op->auth.digest.data,
+ catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
+ DIGEST_BYTE_LENGTH_AES_XCBC);
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1837,50 +1894,53 @@ test_not_in_place_crypto(void)
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+ sym_op->m_dst = dst_m;
+
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
-
- ut_params->op->dst.m = dst_m;
- ut_params->op->dst.offset = 0;
-
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "no crypto operation returned");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto operation processing failed");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->op->dst.m, char *),
+ rte_pktmbuf_mtod(ut_params->op->sym->m_dst, char *),
catch_22_quote,
QUOTE_512_BYTES,
"Plaintext data not as expected");
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 034393e..c84ba42 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -32,8 +32,6 @@
#ifndef TEST_CRYPTODEV_H_
#define TEST_CRYPTODEV_H_
-#define HEX_DUMP 0
-
#define FALSE 0
#define TRUE 1
@@ -47,8 +45,9 @@
#define DEFAULT_NUM_XFORMS (2)
#define NUM_MBUFS (8191)
#define MBUF_CACHE_SIZE (250)
-#define MBUF_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512 + \
- sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATAPAYLOAD_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512)
+#define MBUF_SIZE (sizeof(struct rte_mbuf) + \
+ RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
#define BYTE_LENGTH(x) (x/8)
/* HASH DIGEST LENGTHS */
@@ -62,7 +61,9 @@
#define AES_XCBC_MAC_KEY_SZ (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 (12)
+#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA224 (14)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA256 (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA512 (32)
+#define TRUNCATED_DIGEST_BYTE_LENGTH_AES_XCBC (12)
#endif /* TEST_CRYPTODEV_H_ */
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 42dd9bc..b3f4fd9 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -32,7 +32,6 @@
#include <rte_common.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
@@ -50,7 +49,7 @@
struct crypto_testsuite_params {
struct rte_mempool *mbuf_mp;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
uint16_t nb_queue_pairs;
@@ -68,8 +67,7 @@ struct crypto_unittest_params {
struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_sym_op *op;
- struct rte_mbuf_offload *ol;
+ struct rte_crypto_op *op;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
struct rte_mbuf *ibuf[MAX_NUM_OF_OPS_PER_UT];
@@ -79,7 +77,7 @@ struct crypto_unittest_params {
static struct rte_mbuf *
setup_test_string(struct rte_mempool *mpool,
- const char *string, size_t len, uint8_t blocksize)
+ const uint8_t *data, size_t len, uint8_t blocksize)
{
struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
size_t t_len = len - (blocksize ? (len % blocksize) : 0);
@@ -92,7 +90,7 @@ setup_test_string(struct rte_mempool *mpool,
return NULL;
}
- rte_memcpy(dst, string, t_len);
+ rte_memcpy(dst, (const void *)data, t_len);
}
return m;
}
@@ -113,23 +111,24 @@ testsuite_setup(void)
ts_params->mbuf_mp = rte_mempool_lookup("CRYPTO_PERF_MBUFPOOL");
if (ts_params->mbuf_mp == NULL) {
/* Not already created so create */
- ts_params->mbuf_mp = rte_mempool_create("CRYPTO_PERF_MBUFPOOL", NUM_MBUFS,
- MBUF_SIZE, MBUF_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
- rte_socket_id(), 0);
+ ts_params->mbuf_mp = rte_pktmbuf_pool_create(
+ "CRYPTO_PERF_MBUFPOOL",
+ NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
+ rte_socket_id());
if (ts_params->mbuf_mp == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_PERF_MBUFPOOL\n");
return TEST_FAILED;
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
- NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_sym_xform),
- rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+
+ ts_params->op_mpool = rte_crypto_op_pool_create("CRYPTO_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ NUM_MBUFS, MBUF_CACHE_SIZE,
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_sym_xform),
+ rte_socket_id());
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -256,8 +255,8 @@ ut_teardown(void)
ut_params->sess);
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
for (i = 0; i < MAX_NUM_OF_OPS_PER_UT; i++) {
if (ut_params->obuf[i])
@@ -1698,11 +1697,12 @@ struct crypto_data_params aes_cbc_hmac_sha256_output[MAX_PACKET_SIZE_INDEX] = {
static int
test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
{
- uint32_t num_to_submit = 2048, max_outstanding_reqs = 512;
- struct rte_mbuf *rx_mbufs[num_to_submit], *tx_mbufs[num_to_submit];
+ uint32_t num_to_submit = 4096;
+ struct rte_crypto_op *c_ops[num_to_submit];
+ struct rte_crypto_op *proc_ops[num_to_submit];
uint64_t failed_polls, retries, start_cycles, end_cycles, total_cycles = 0;
uint32_t burst_sent, burst_received;
- uint32_t b, burst_size, num_sent, num_received;
+ uint32_t i, burst_size, num_sent, num_received;
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1739,46 +1739,50 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure(s) */
- for (b = 0; b < num_to_submit ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ for (i = 0; i < num_to_submit ; i++) {
+ struct rte_mbuf *m = setup_test_string(ts_params->mbuf_mp,
+ data_params[0].expected.ciphertext,
data_params[0].length, 0);
- TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
+ TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(tx_mbufs[b],
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ TEST_ASSERT_NOT_NULL(ut_params->digest,
+ "no room to append digest");
rte_memcpy(ut_params->digest, data_params[0].expected.digest,
DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_sym_op *cop = &ol->op.crypto;
+ struct rte_crypto_op *op =
+ rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+ rte_crypto_op_attach_sym_session(op, ut_params->sess);
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
+ op->sym->auth.digest.data = ut_params->digest;
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
data_params[0].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->auth.data.length = data_params[0].length;
+
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ op->sym->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(m,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[0].length;
+ op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.data.length = data_params[0].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[0].length;
+ op->sym->m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[i] = op;
}
printf("\nTest to measure the IA cycle cost using AES128_CBC_SHA256_HMAC "
@@ -1789,17 +1793,17 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
printf("\nDev No\tQP No\tNum Sent\tNum Received\tTx/Rx burst");
printf("\tRetries (Device Busy)\tAverage IA cycle cost "
"(assuming 0 retries)");
- for (b = 2; b <= 128 ; b *= 2) {
+ for (i = 2; i <= 128 ; i *= 2) {
num_sent = 0;
num_received = 0;
retries = 0;
failed_polls = 0;
- burst_size = b;
+ burst_size = i;
total_cycles = 0;
while (num_sent < num_to_submit) {
start_cycles = rte_rdtsc_precise();
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0,
- &tx_mbufs[num_sent],
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, &c_ops[num_sent],
((num_to_submit-num_sent) < burst_size) ?
num_to_submit-num_sent : burst_size);
if (burst_sent == 0)
@@ -1814,9 +1818,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1824,15 +1827,15 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
end_cycles = rte_rdtsc_precise();
total_cycles += end_cycles - start_cycles;
}
+
while (num_received != num_to_submit) {
if (gbl_cryptodev_preftest_devtype ==
- RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num,
- 0, NULL, 0);
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1846,16 +1849,9 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
printf("\n");
- for (b = 0; b < max_outstanding_reqs ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ for (i = 0; i < num_to_submit ; i++) {
+ rte_pktmbuf_free(c_ops[i]->sym->m_src);
+ rte_crypto_op_free(c_ops[i]);
}
return TEST_SUCCESS;
}
@@ -1865,11 +1861,14 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
{
uint16_t index;
uint32_t burst_sent, burst_received;
- uint32_t b, num_sent, num_received, throughput;
+ uint32_t b, num_sent, num_received;
uint64_t failed_polls, retries, start_cycles, end_cycles;
const uint64_t mhz = rte_get_tsc_hz()/1000000;
- double mmps;
- struct rte_mbuf *rx_mbufs[DEFAULT_BURST_SIZE], *tx_mbufs[DEFAULT_BURST_SIZE];
+ double throughput, mmps;
+
+ struct rte_crypto_op *c_ops[DEFAULT_BURST_SIZE];
+ struct rte_crypto_op *proc_ops[DEFAULT_BURST_SIZE];
+
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1908,7 +1907,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
"AES128_CBC_SHA256_HMAC requests with a constant burst "
"size of %u while varying payload sizes", DEFAULT_BURST_SIZE);
printf("\nDev No\tQP No\tReq Size(B)\tNum Sent\tNum Received\t"
- "Mrps\tThoughput(Mbps)");
+ "Mrps\tThoughput(Gbps)");
printf("\tRetries (Attempted a burst, but the device was busy)");
for (index = 0; index < MAX_PACKET_SIZE_INDEX; index++) {
num_sent = 0;
@@ -1918,63 +1917,70 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
+ struct rte_mbuf *m = setup_test_string(
+ ts_params->mbuf_mp,
+ (const uint8_t *)
data_params[index].plaintext,
data_params[index].length,
0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(
- tx_mbufs[b], DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
+ DIGEST_BYTE_LENGTH_SHA256);
+ TEST_ASSERT_NOT_NULL(ut_params->digest
+ , "no room to append digest");
- rte_memcpy(ut_params->digest, data_params[index].expected.digest,
- DIGEST_BYTE_LENGTH_SHA256);
+ rte_memcpy(ut_params->digest,
+ data_params[index].expected.digest,
+ DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
+ struct rte_crypto_op *op = rte_crypto_op_alloc(
+ ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- struct rte_crypto_sym_op *cop = &ol->op.crypto;
+ rte_crypto_op_attach_sym_session(op, ut_params->sess);
- rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+ op->sym->auth.digest.data = ut_params->digest;
+ op->sym->auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(m,
+ data_params[index].length);
+ op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
- tx_mbufs[b], data_params[index].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->auth.data.length = data_params[index].length;
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ op->sym->cipher.iv.data = (uint8_t *)
+ rte_pktmbuf_prepend(m,
+ CIPHER_IV_LENGTH_AES_CBC);
+ op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.data.length = data_params[index].length;
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[index].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[index].length;
+ op->sym->m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[b] = op;
}
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num,
- 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
- < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
- DEFAULT_BURST_SIZE);
+ uint16_t burst_size = (DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent) < DEFAULT_BURST_SIZE ?
+ DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent : DEFAULT_BURST_SIZE;
+
+ burst_sent = rte_cryptodev_enqueue_burst(
+ dev_num, 0, c_ops, burst_size);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, DEFAULT_BURST_SIZE);
+ burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ 0, proc_ops, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
@@ -1982,37 +1988,34 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
if (gbl_cryptodev_preftest_devtype ==
- RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num,
- 0, NULL, 0);
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num, 0,
- rx_mbufs, DEFAULT_BURST_SIZE);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops,
+ DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
num_received += burst_received;
}
end_cycles = rte_rdtsc_precise();
- mmps = (double)num_received*mhz/(end_cycles - start_cycles);
- throughput = mmps*data_params[index].length*8;
+ mmps = ((double)num_received * mhz) /
+ (end_cycles - start_cycles);
+ throughput = (mmps * data_params[index].length * 8) / 1000;
+
printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
- data_params[index].length, num_sent, num_received);
- printf("\t%.2f\t%u", mmps, throughput);
+ data_params[index].length,
+ num_sent, num_received);
+ printf("\t%.2f\t%.2f", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ rte_pktmbuf_free(c_ops[b]->sym->m_src);
+ rte_crypto_op_free(c_ops[b]);
}
}
+
printf("\n");
return TEST_SUCCESS;
}
diff --git a/config/common_bsdapp b/config/common_bsdapp
index 7df5ac6..3ac2ebd 100644
--- a/config/common_bsdapp
+++ b/config/common_bsdapp
@@ -301,7 +301,6 @@ CONFIG_RTE_PMD_PACKET_PREFETCH=y
#
# Compile generic crypto device library
-# EXPERIMENTAL: API may change without prior notice
#
CONFIG_RTE_LIBRTE_CRYPTODEV=y
CONFIG_RTE_LIBRTE_CRYPTODEV_DEBUG=n
@@ -352,13 +351,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
#
-# Compile librte_mbuf_offload
-# EXPERIMENTAL: API may change without prior notice
-#
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n
-
-#
# Compile librte_timer
#
CONFIG_RTE_LIBRTE_TIMER=y
diff --git a/config/common_linuxapp b/config/common_linuxapp
index 26df137..3cb9ebe 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -315,7 +315,6 @@ CONFIG_RTE_PMD_PACKET_PREFETCH=y
#
# Compile generic crypto device library
-# EXPERIMENTAL: API may change without prior notice
#
CONFIG_RTE_LIBRTE_CRYPTODEV=y
CONFIG_RTE_LIBRTE_CRYPTODEV_DEBUG=n
@@ -368,13 +367,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
#
-# Compile librte_mbuf_offload
-# EXPERIMENTAL: API may change without prior notice
-#
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n
-
-#
# Compile librte_timer
#
CONFIG_RTE_LIBRTE_TIMER=y
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 7a91001..f626386 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -104,7 +104,6 @@ There are many libraries, so their headers may be grouped by topics:
- **containers**:
[mbuf] (@ref rte_mbuf.h),
- [mbuf_offload] (@ref rte_mbuf_offload.h),
[ring] (@ref rte_ring.h),
[distributor] (@ref rte_distributor.h),
[reorder] (@ref rte_reorder.h),
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 61d93cd..5b26444 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -37,7 +37,6 @@
#include <rte_dev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
-#include <rte_mbuf_offload.h>
#include "rte_aesni_mb_pmd_private.h"
@@ -296,16 +295,16 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(crypto_op->session->type !=
+ if (op->sym->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
- sess = (struct aesni_mb_session *)crypto_op->session->_private;
+ sess = (struct aesni_mb_session *)op->sym->session->_private;
} else {
void *_sess = NULL;
@@ -316,7 +315,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
- sess, crypto_op->xform) != 0)) {
+ sess, op->sym->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
}
@@ -338,12 +337,14 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
* - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
*/
static JOB_AES_HMAC *
-process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_sym_op *c_op,
+process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
+ struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+ uint16_t m_offset = 0;
+
job = (*qp->ops->job.get_next)(&qp->mb_mgr);
if (unlikely(job == NULL))
return job;
@@ -372,49 +373,65 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
}
/* Mutable crypto operation parameters */
+ if (op->sym->m_dst) {
+ m_src = m_dst = op->sym->m_dst;
+
+ /* append space for output data to mbuf */
+ char *odata = rte_pktmbuf_append(m_dst,
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (odata == NULL)
+ MB_LOG_ERR("failed to allocate space in destination "
+ "mbuf for source data");
+
+ memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ } else {
+ m_dst = m_src;
+ m_offset = op->sym->cipher.data.offset;
+ }
/* Set digest output location */
if (job->cipher_direction == DECRYPT) {
- job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m,
+ job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
get_digest_byte_length(job->hash_alg));
- if (job->auth_tag_output)
- memset(job->auth_tag_output, 0,
- sizeof(get_digest_byte_length(job->hash_alg)));
- else
+ if (job->auth_tag_output == NULL) {
+ MB_LOG_ERR("failed to allocate space in output mbuf "
+ "for temp digest");
return NULL;
+ }
+
+ memset(job->auth_tag_output, 0,
+ sizeof(get_digest_byte_length(job->hash_alg)));
+
} else {
- job->auth_tag_output = c_op->digest.data;
+ job->auth_tag_output = op->sym->auth.digest.data;
}
/*
- * Multiple buffer library current only support returning a truncated
+ * Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
job->auth_tag_output_len_in_bytes =
get_truncated_digest_byte_length(job->hash_alg);
/* Set IV parameters */
- job->iv = c_op->iv.data;
- job->iv_len_in_bytes = c_op->iv.length;
+ job->iv = op->sym->cipher.iv.data;
+ job->iv_len_in_bytes = op->sym->cipher.iv.length;
/* Data Parameter */
- job->src = rte_pktmbuf_mtod(m, uint8_t *);
- job->dst = c_op->dst.m ?
- rte_pktmbuf_mtod(c_op->dst.m, uint8_t *) +
- c_op->dst.offset :
- rte_pktmbuf_mtod(m, uint8_t *) +
- c_op->data.to_cipher.offset;
+ job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
- job->cipher_start_src_offset_in_bytes = c_op->data.to_cipher.offset;
- job->msg_len_to_cipher_in_bytes = c_op->data.to_cipher.length;
+ job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
- job->hash_start_src_offset_in_bytes = c_op->data.to_hash.offset;
- job->msg_len_to_hash_in_bytes = c_op->data.to_hash.length;
+ job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
/* Set user data to be crypto operation data struct */
- job->user_data = m;
- job->user_data2 = c_op;
+ job->user_data = op;
+ job->user_data2 = m_dst;
return job;
}
@@ -429,43 +446,41 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
* verification of supplied digest in the case of a HASH_CIPHER operation
* - Returns NULL on invalid job
*/
-static struct rte_mbuf *
+static struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m;
- struct rte_crypto_sym_op *c_op;
+ struct rte_crypto_op *op =
+ (struct rte_crypto_op *)job->user_data;
+ struct rte_mbuf *m_dst =
+ (struct rte_mbuf *)job->user_data2;
- if (job->user_data == NULL)
+ if (op == NULL || m_dst == NULL)
return NULL;
- /* handled retrieved job */
- m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_sym_op *)job->user_data2;
-
/* set status as successful by default */
- c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* check if job has been processed */
if (unlikely(job->status != STS_COMPLETED)) {
- c_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return m;
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return op;
} else if (job->chain_order == HASH_CIPHER) {
/* Verify digest if required */
- if (memcmp(job->auth_tag_output, c_op->digest.data,
+ if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
job->auth_tag_output_len_in_bytes) != 0)
- c_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m, get_digest_byte_length(job->hash_alg));
+ rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
- rte_mempool_put(qp->sess_mp, c_op->session);
- c_op->session = NULL;
+ if (op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
}
- return m;
+ return op;
}
/**
@@ -481,16 +496,16 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
static unsigned
handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m = NULL;
+ struct rte_crypto_op *op = NULL;
unsigned processed_jobs = 0;
while (job) {
processed_jobs++;
- m = post_process_mb_job(qp, job);
- if (m)
- rte_ring_enqueue(qp->processed_pkts, (void *)m);
+ op = post_process_mb_job(qp, job);
+ if (op)
+ rte_ring_enqueue(qp->processed_pkts, (void *)op);
else
- qp->qp_stats.dequeue_err_count++;
+ qp->stats.dequeue_err_count++;
job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
}
@@ -499,53 +514,53 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
static uint16_t
-aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
- uint16_t nb_bufs)
+aesni_mb_pmd_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
-
struct aesni_mb_session *sess;
- struct aesni_mb_qp *qp = queue_pair;
+ struct aesni_mb_qp *_qp = qp;
JOB_AES_HMAC *job = NULL;
int i, processed_jobs = 0;
- for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i],
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- if (unlikely(ol == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ for (i = 0; i < nb_ops; i++) {
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ MB_LOG_ERR("PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
-
- sess = get_session(qp, &ol->op.crypto);
+#endif
+ sess = get_session(_qp, ops[i]);
if (unlikely(sess == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
- job = process_crypto_op(qp, bufs[i], &ol->op.crypto, sess);
+ job = process_crypto_op(_qp, ops[i], sess);
if (unlikely(job == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
/* Submit Job */
- job = (*qp->ops->job.submit)(&qp->mb_mgr);
+ job = (*_qp->ops->job.submit)(&_qp->mb_mgr);
/*
* If submit returns a processed job then handle it,
* before submitting subsequent jobs
*/
if (job)
- processed_jobs += handle_completed_jobs(qp, job);
+ processed_jobs += handle_completed_jobs(_qp, job);
}
if (processed_jobs == 0)
goto flush_jobs;
else
- qp->qp_stats.enqueued_count += processed_jobs;
+ _qp->stats.enqueued_count += processed_jobs;
return i;
flush_jobs:
@@ -553,24 +568,24 @@ flush_jobs:
* If we haven't processed any jobs in submit loop, then flush jobs
* queue to stop the output stalling
*/
- job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
+ job = (*_qp->ops->job.flush_job)(&_qp->mb_mgr);
if (job)
- qp->qp_stats.enqueued_count += handle_completed_jobs(qp, job);
+ _qp->stats.enqueued_count += handle_completed_jobs(_qp, job);
return i;
}
static uint16_t
-aesni_mb_pmd_dequeue_burst(void *queue_pair,
- struct rte_mbuf **bufs, uint16_t nb_bufs)
+aesni_mb_pmd_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct aesni_mb_qp *qp = queue_pair;
+ struct aesni_mb_qp *_qp = qp;
unsigned nb_dequeued;
- nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)bufs, nb_bufs);
- qp->qp_stats.dequeued_count += nb_dequeued;
+ nb_dequeued = rte_ring_dequeue_burst(_qp->processed_pkts,
+ (void **)ops, nb_ops);
+ _qp->stats.dequeued_count += nb_dequeued;
return nb_dequeued;
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 3cd9990..d56de12 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -76,11 +76,11 @@ aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- stats->enqueued_count += qp->qp_stats.enqueued_count;
- stats->dequeued_count += qp->qp_stats.dequeued_count;
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
- stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
- stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
}
}
@@ -93,7 +93,7 @@ aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
}
}
@@ -196,7 +196,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp->sess_mp = dev->data->session_pool;
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
/* Initialise multi-buffer manager */
(*qp->ops->job.init_mgr)(&qp->mb_mgr);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index ab70c15..0aed177 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -150,7 +150,7 @@ struct aesni_mb_qp {
/**< Ring for placing process packets */
struct rte_mempool *sess_mp;
/**< Session Mempool */
- struct rte_cryptodev_stats qp_stats;
+ struct rte_cryptodev_stats stats;
/**< Queue pair statistics */
} __rte_cache_aligned;
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index e7b9027..38dc956 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -59,7 +59,6 @@
#include <rte_mbuf.h>
#include <rte_string_fns.h>
#include <rte_spinlock.h>
-#include <rte_mbuf_offload.h>
#include <rte_hexdump.h>
#include "qat_logs.h"
@@ -72,7 +71,7 @@ static inline uint32_t
adf_modulo(uint32_t data, uint32_t shift);
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg);
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
void *session)
@@ -275,15 +274,16 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
register struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
register uint32_t nb_pkts_sent = 0;
- register struct rte_mbuf **cur_tx_pkt = tx_pkts;
+ register struct rte_crypto_op **cur_op = ops;
register int ret;
- uint16_t nb_pkts_possible = nb_pkts;
+ uint16_t nb_ops_possible = nb_ops;
register uint8_t *base_addr;
register uint32_t tail;
int overflow;
@@ -294,19 +294,17 @@ uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = queue->tail;
/* Find how many can actually fit on the ring */
- overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_pkts)
+ overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
- queue->max_inflights;
if (overflow > 0) {
rte_atomic16_sub(&tmp_qp->inflights16, overflow);
- nb_pkts_possible = nb_pkts - overflow;
- if (nb_pkts_possible == 0)
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
return 0;
}
- while (nb_pkts_sent != nb_pkts_possible) {
-
- ret = qat_alg_write_mbuf_entry(*cur_tx_pkt,
- base_addr + tail);
+ while (nb_pkts_sent != nb_ops_possible) {
+ ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
if (nb_pkts_sent == 0)
@@ -316,7 +314,7 @@ uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = adf_modulo(tail + queue->msg_size, queue->modulo);
nb_pkts_sent++;
- cur_tx_pkt++;
+ cur_op++;
}
kick_tail:
WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
@@ -327,14 +325,13 @@ kick_tail:
}
uint16_t
-qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
uint32_t msg_counter = 0;
- struct rte_mbuf *rx_mbuf;
+ struct rte_crypto_op *rx_op;
struct icp_qat_fw_comn_resp *resp_msg;
queue = &(tmp_qp->rx_q);
@@ -342,17 +339,20 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
((uint8_t *)queue->base_addr + queue->head);
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
- msg_counter != nb_pkts) {
- rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
+ msg_counter != nb_ops) {
+ rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
*(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
queue->head = adf_modulo(queue->head +
@@ -361,9 +361,8 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
resp_msg = (struct icp_qat_fw_comn_resp *)
((uint8_t *)queue->base_addr +
queue->head);
-
- *rx_pkts = rx_mbuf;
- rx_pkts++;
+ *ops = rx_op;
+ ops++;
msg_counter++;
}
if (msg_counter > 0) {
@@ -377,38 +376,36 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
}
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
{
- struct rte_mbuf_offload *ol;
-
struct qat_session *ctx;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
- if (unlikely(ol == NULL)) {
- PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
- "to (%p) mbuf.", mbuf);
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
return -EINVAL;
}
-
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
+#endif
+ if (unlikely(op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
- " requests mbuf (%p) is sessionless.", mbuf);
+ " requests, op (%p) is sessionless.", op);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type
- != RTE_CRYPTODEV_QAT_SYM_PMD)) {
+ if (unlikely(op->sym->session->type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
- ctx = (struct qat_session *)ol->op.crypto.session->_private;
+ ctx = (struct qat_session *)op->sym->session->_private;
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
*qat_req = ctx->fw_req;
- qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)mbuf;
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
/*
* The following code assumes:
@@ -416,37 +413,37 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
* - always in place.
*/
qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length = mbuf->data_len;
+ qat_req->comn_mid.src_length =
+ rte_pktmbuf_data_len(op->sym->m_src);
qat_req->comn_mid.dest_data_addr =
qat_req->comn_mid.src_data_addr =
- rte_pktmbuf_mtophys(mbuf);
-
+ rte_pktmbuf_mtophys(op->sym->m_src);
cipher_param = (void *)&qat_req->serv_specif_rqpars;
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
- cipher_param->cipher_length = ol->op.crypto.data.to_cipher.length;
- cipher_param->cipher_offset = ol->op.crypto.data.to_cipher.offset;
- if (ol->op.crypto.iv.length &&
- (ol->op.crypto.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array))) {
+ cipher_param->cipher_length = op->sym->cipher.data.length;
+ cipher_param->cipher_offset = op->sym->cipher.data.offset;
+ if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array))) {
rte_memcpy(cipher_param->u.cipher_IV_array,
- ol->op.crypto.iv.data, ol->op.crypto.iv.length);
+ op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
} else {
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr = ol->op.crypto.iv.phys_addr;
+ cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr;
}
- if (ol->op.crypto.digest.phys_addr) {
+ if (op->sym->auth.digest.phys_addr) {
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
- auth_param->auth_res_addr = ol->op.crypto.digest.phys_addr;
+ auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
}
- auth_param->auth_off = ol->op.crypto.data.to_hash.offset;
- auth_param->auth_len = ol->op.crypto.data.to_hash.length;
- auth_param->u1.aad_adr = ol->op.crypto.additional_auth.phys_addr;
+ auth_param->auth_off = op->sym->auth.data.offset;
+ auth_param->auth_len = op->sym->auth.data.length;
+ auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
/* (GCM) aad length(240 max) will be at this location after precompute */
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
@@ -457,9 +454,19 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
}
auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));
+ rte_hexdump(stdout, "src_data:",
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ op->sym->auth.digest.length);
+ rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
+ op->sym->auth.aad.length);
#endif
return 0;
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index e9f71fe..9323383 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -115,12 +115,12 @@ extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
-uint16_t
-qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
-uint16_t
-qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 85700fc..5e51aca 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -92,8 +92,8 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst;
internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 93de786..6d5aeeb 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -62,7 +62,6 @@
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_memcpy.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -85,6 +84,7 @@
*/
#define RTE_TEST_RX_DESC_DEFAULT 128
#define RTE_TEST_TX_DESC_DEFAULT 512
+
static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
@@ -104,6 +104,11 @@ struct pkt_buffer {
struct rte_mbuf *buffer[MAX_PKT_BURST];
};
+struct op_buffer {
+ unsigned len;
+ struct rte_crypto_op *buffer[MAX_PKT_BURST];
+};
+
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
@@ -112,6 +117,12 @@ enum l2fwd_crypto_xform_chain {
L2FWD_CRYPTO_HASH_CIPHER
};
+struct l2fwd_key {
+ uint8_t *data;
+ uint32_t length;
+ phys_addr_t phys_addr;
+};
+
/** l2fwd crypto application command line options */
struct l2fwd_crypto_options {
unsigned portmask;
@@ -127,7 +138,7 @@ struct l2fwd_crypto_options {
struct rte_crypto_sym_xform cipher_xform;
uint8_t ckey_data[32];
- struct rte_crypto_sym_key iv_key;
+ struct l2fwd_key iv_key;
uint8_t ivkey_data[16];
struct rte_crypto_sym_xform auth_xform;
@@ -142,10 +153,7 @@ struct l2fwd_crypto_params {
unsigned digest_length;
unsigned block_size;
- struct {
- uint8_t *data;
- uint16_t length;
- } iv;
+ struct l2fwd_key iv_key;
struct rte_cryptodev_sym_session *session;
};
@@ -157,14 +165,16 @@ struct lcore_queue_conf {
unsigned nb_crypto_devs;
unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
- struct pkt_buffer crypto_pkt_buf[RTE_MAX_ETHPORTS];
- struct pkt_buffer tx_pkt_buf[RTE_MAX_ETHPORTS];
+ struct op_buffer op_buf[RTE_MAX_ETHPORTS];
+ struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
} __rte_cache_aligned;
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
static const struct rte_eth_conf port_conf = {
.rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+ .max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
@@ -178,7 +188,7 @@ static const struct rte_eth_conf port_conf = {
};
struct rte_mempool *l2fwd_pktmbuf_pool;
-struct rte_mempool *l2fwd_mbuf_ol_pool;
+struct rte_mempool *l2fwd_crypto_op_pool;
/* Per-port statistics struct */
struct l2fwd_port_statistics {
@@ -203,7 +213,7 @@ struct l2fwd_crypto_statistics crypto_statistics[RTE_MAX_ETHPORTS];
/* A tsc-based timer responsible for triggering statistics printout */
#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
-#define MAX_TIMER_PERIOD 86400 /* 1 day max */
+#define MAX_TIMER_PERIOD 86400UL /* 1 day max */
/* default period is 10 seconds */
static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000;
@@ -292,20 +302,21 @@ static int
l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
struct l2fwd_crypto_params *cparams)
{
- struct rte_mbuf **pkt_buffer;
+ struct rte_crypto_op **op_buffer;
unsigned ret;
- pkt_buffer = (struct rte_mbuf **)
- qconf->crypto_pkt_buf[cparams->dev_id].buffer;
+ op_buffer = (struct rte_crypto_op **)
+ qconf->op_buf[cparams->dev_id].buffer;
+
+ ret = rte_cryptodev_enqueue_burst(cparams->dev_id,
+ cparams->qp_id, op_buffer, (uint16_t) n);
- ret = rte_cryptodev_enqueue_burst(cparams->dev_id, cparams->qp_id,
- pkt_buffer, (uint16_t) n);
crypto_statistics[cparams->dev_id].enqueued += ret;
if (unlikely(ret < n)) {
crypto_statistics[cparams->dev_id].errors += (n - ret);
do {
- rte_pktmbuf_offload_free(pkt_buffer[ret]->offload_ops);
- rte_pktmbuf_free(pkt_buffer[ret]);
+ rte_pktmbuf_free(op_buffer[ret]->sym->m_src);
+ rte_crypto_op_free(op_buffer[ret]);
} while (++ret < n);
}
@@ -313,7 +324,8 @@ l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
}
static int
-l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
+l2fwd_crypto_enqueue(struct rte_crypto_op *op,
+ struct l2fwd_crypto_params *cparams)
{
unsigned lcore_id, len;
struct lcore_queue_conf *qconf;
@@ -321,23 +333,23 @@ l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->crypto_pkt_buf[cparams->dev_id].len;
- qconf->crypto_pkt_buf[cparams->dev_id].buffer[len] = m;
+ len = qconf->op_buf[cparams->dev_id].len;
+ qconf->op_buf[cparams->dev_id].buffer[len] = op;
len++;
- /* enough pkts to be sent */
+ /* enough ops to be sent */
if (len == MAX_PKT_BURST) {
l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams);
len = 0;
}
- qconf->crypto_pkt_buf[cparams->dev_id].len = len;
+ qconf->op_buf[cparams->dev_id].len = len;
return 0;
}
static int
l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
- struct rte_mbuf_offload *ol,
+ struct rte_crypto_op *op,
struct l2fwd_crypto_params *cparams)
{
struct ether_hdr *eth_hdr;
@@ -375,43 +387,43 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_op_attach_sym_session(op, cparams->session);
/* Append space for digest to end of packet */
- ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
+ op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
cparams->digest_length);
- ol->op.crypto.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
- ol->op.crypto.digest.length = cparams->digest_length;
+ op->sym->auth.digest.length = cparams->digest_length;
+
+ op->sym->auth.data.offset = ipdata_offset;
+ op->sym->auth.data.length = data_len;
- ol->op.crypto.iv.data = cparams->iv_key.data;
- ol->op.crypto.iv.phys_addr = cparams->iv_key.phys_addr;
- ol->op.crypto.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_cipher.offset = ipdata_offset;
- ol->op.crypto.data.to_cipher.length = data_len;
+ op->sym->cipher.iv.data = cparams->iv_key.data;
+ op->sym->cipher.iv.phys_addr = cparams->iv_key.phys_addr;
+ op->sym->cipher.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_hash.offset = ipdata_offset;
- ol->op.crypto.data.to_hash.length = data_len;
+ op->sym->cipher.data.offset = ipdata_offset;
+ op->sym->cipher.data.length = data_len;
- rte_pktmbuf_offload_attach(m, ol);
+ op->sym->m_src = m;
- return l2fwd_crypto_enqueue(m, cparams);
+ return l2fwd_crypto_enqueue(op, cparams);
}
/* Send the burst of packets on an output interface */
static int
-l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)
+l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
+ uint8_t port)
{
struct rte_mbuf **pkt_buffer;
unsigned ret;
- unsigned queueid = 0;
- pkt_buffer = (struct rte_mbuf **)qconf->tx_pkt_buf[port].buffer;
+ pkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer;
- ret = rte_eth_tx_burst(port, (uint16_t) queueid, pkt_buffer,
- (uint16_t)n);
+ ret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n);
port_statistics[port].tx += ret;
if (unlikely(ret < n)) {
port_statistics[port].dropped += (n - ret);
@@ -433,8 +445,8 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->tx_pkt_buf[port].len;
- qconf->tx_pkt_buf[port].buffer[len] = m;
+ len = qconf->pkt_buf[port].len;
+ qconf->pkt_buf[port].buffer[len] = m;
len++;
/* enough pkts to be sent */
@@ -443,7 +455,7 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
len = 0;
}
- qconf->tx_pkt_buf[port].len = len;
+ qconf->pkt_buf[port].len = len;
return 0;
}
@@ -503,6 +515,8 @@ static void
l2fwd_main_loop(struct l2fwd_crypto_options *options)
{
struct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST];
+ struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
+
unsigned lcore_id = rte_lcore_id();
uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
unsigned i, j, portid, nb_rx;
@@ -563,12 +577,12 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
if (unlikely(diff_tsc > drain_tsc)) {
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- if (qconf->tx_pkt_buf[portid].len == 0)
+ if (qconf->pkt_buf[portid].len == 0)
continue;
l2fwd_send_burst(&lcore_queue_conf[lcore_id],
- qconf->tx_pkt_buf[portid].len,
+ qconf->pkt_buf[portid].len,
(uint8_t) portid);
- qconf->tx_pkt_buf[portid].len = 0;
+ qconf->pkt_buf[portid].len = 0;
}
/* if timer is enabled */
@@ -597,8 +611,6 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
* Read packet from RX queues
*/
for (i = 0; i < qconf->nb_rx_ports; i++) {
- struct rte_mbuf_offload *ol;
-
portid = qconf->rx_port_list[i];
cparams = &port_cparams[i];
@@ -608,44 +620,49 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
port_statistics[portid].rx += nb_rx;
- /* Enqueue packets from Crypto device*/
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- ol = rte_pktmbuf_offload_alloc(
- l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
+ if (nb_rx) {
/*
- * If we can't allocate a offload, then drop
+ * If we can't allocate a crypto_ops, then drop
* the rest of the burst and dequeue and
* process the packets to free offload structs
*/
- if (unlikely(ol == NULL)) {
- for (; j < nb_rx; j++) {
- rte_pktmbuf_free(pkts_burst[j]);
- port_statistics[portid].dropped++;
- }
- break;
+ if (rte_crypto_op_bulk_alloc(
+ l2fwd_crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops_burst, nb_rx) !=
+ nb_rx) {
+ for (j = 0; j < nb_rx; j++)
+ rte_pktmbuf_free(pkts_burst[i]);
+
+ nb_rx = 0;
}
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- rte_prefetch0((void *)ol);
+ /* Enqueue packets from Crypto device*/
+ for (j = 0; j < nb_rx; j++) {
+ m = pkts_burst[j];
- l2fwd_simple_crypto_enqueue(m, ol, cparams);
+ l2fwd_simple_crypto_enqueue(m,
+ ops_burst[j], cparams);
+ }
}
/* Dequeue packets from Crypto device */
- nb_rx = rte_cryptodev_dequeue_burst(
- cparams->dev_id, cparams->qp_id,
- pkts_burst, MAX_PKT_BURST);
- crypto_statistics[cparams->dev_id].dequeued += nb_rx;
-
- /* Forward crypto'd packets */
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- rte_pktmbuf_offload_free(m->offload_ops);
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- l2fwd_simple_forward(m, portid);
- }
+ do {
+ nb_rx = rte_cryptodev_dequeue_burst(
+ cparams->dev_id, cparams->qp_id,
+ ops_burst, MAX_PKT_BURST);
+
+ crypto_statistics[cparams->dev_id].dequeued +=
+ nb_rx;
+
+ /* Forward crypto'd packets */
+ for (j = 0; j < nb_rx; j++) {
+ m = ops_burst[j]->sym->m_src;
+
+ rte_crypto_op_free(ops_burst[j]);
+ l2fwd_simple_forward(m, portid);
+ }
+ } while (nb_rx == MAX_PKT_BURST);
}
}
}
@@ -748,8 +765,8 @@ parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
/** Parse crypto key command line argument */
static int
-parse_key(struct rte_crypto_sym_key *key __rte_unused,
- unsigned length __rte_unused, char *arg __rte_unused)
+parse_key(struct l2fwd_key *key __rte_unused, unsigned length __rte_unused,
+ char *arg __rte_unused)
{
printf("Currently an unsupported argument!\n");
return -1;
@@ -759,26 +776,20 @@ parse_key(struct rte_crypto_sym_key *key __rte_unused,
static int
parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg)
{
- if (strcmp("SHA1", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA1;
+ if (strcmp("MD5_HMAC", optarg) == 0) {
+ *algo = RTE_CRYPTO_AUTH_MD5_HMAC;
return 0;
} else if (strcmp("SHA1_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
return 0;
- } else if (strcmp("SHA224", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA224;
- return 0;
} else if (strcmp("SHA224_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
return 0;
- } else if (strcmp("SHA256", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA256;
- return 0;
} else if (strcmp("SHA256_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
return 0;
- } else if (strcmp("SHA512", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA256;
+ } else if (strcmp("SHA384_HMAC", optarg) == 0) {
+ *algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
return 0;
} else if (strcmp("SHA512_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
@@ -809,7 +820,7 @@ static int
l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
struct option *lgopts, int option_index)
{
- if (strcmp(lgopts[option_index].name, "cdev_type") == 0)
+ if (strcmp(lgopts[option_index].name, "cdev") == 0)
return parse_cryptodev_type(&options->cdev_type, optarg);
else if (strcmp(lgopts[option_index].name, "chain") == 0)
@@ -824,11 +835,17 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return parse_cipher_op(&options->cipher_xform.cipher.op,
optarg);
- else if (strcmp(lgopts[option_index].name, "cipher_key") == 0)
- return parse_key(&options->cipher_xform.cipher.key,
- sizeof(options->ckey_data), optarg);
+ else if (strcmp(lgopts[option_index].name, "cipher_key") == 0) {
+ struct l2fwd_key key = { 0 };
+ int retval = 0;
+
+ retval = parse_key(&key, sizeof(options->ckey_data), optarg);
- else if (strcmp(lgopts[option_index].name, "iv") == 0)
+ options->cipher_xform.cipher.key.data = key.data;
+ options->cipher_xform.cipher.key.length = key.length;
+
+ return retval;
+ } else if (strcmp(lgopts[option_index].name, "iv") == 0)
return parse_key(&options->iv_key, sizeof(options->ivkey_data),
optarg);
@@ -841,11 +858,17 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return parse_auth_op(&options->auth_xform.auth.op,
optarg);
- else if (strcmp(lgopts[option_index].name, "auth_key") == 0)
- return parse_key(&options->auth_xform.auth.key,
- sizeof(options->akey_data), optarg);
+ else if (strcmp(lgopts[option_index].name, "auth_key") == 0) {
+ struct l2fwd_key key = { 0 };
+ int retval = 0;
+
+ retval = parse_key(&key, sizeof(options->akey_data), optarg);
- else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
+ options->auth_xform.auth.key.data = key.data;
+ options->auth_xform.auth.key.length = key.length;
+
+ return retval;
+ } else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
options->sessionless = 1;
return 0;
}
@@ -905,16 +928,16 @@ l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options,
const char *q_arg)
{
char *end = NULL;
- long int n;
+ unsigned long n;
/* parse number string */
- n = strtol(q_arg, &end, 10);
+ n = (unsigned)strtol(q_arg, &end, 10);
if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
n = 0;
if (n >= MAX_TIMER_PERIOD) {
- printf("Warning refresh period specified %ld is greater than "
- "max value %d! using max value",
+ printf("Warning refresh period specified %lu is greater than "
+ "max value %lu! using max value",
n, MAX_TIMER_PERIOD);
n = MAX_TIMER_PERIOD;
}
@@ -934,13 +957,13 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
options->nb_ports_per_lcore = 1;
options->refresh_period = 10000;
options->single_lcore = 0;
+ options->sessionless = 0;
options->cdev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
- options->sessionless = 0;
options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
/* Cipher Data */
- options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ options->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
options->cipher_xform.next = NULL;
options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -949,12 +972,11 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
generate_random_key(options->ckey_data, sizeof(options->ckey_data));
options->cipher_xform.cipher.key.data = options->ckey_data;
- options->cipher_xform.cipher.key.phys_addr = 0;
options->cipher_xform.cipher.key.length = 16;
/* Authentication Data */
- options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ options->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
options->auth_xform.next = NULL;
options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
@@ -966,7 +988,6 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
generate_random_key(options->akey_data, sizeof(options->akey_data));
options->auth_xform.auth.key.data = options->akey_data;
- options->auth_xform.auth.key.phys_addr = 0;
options->auth_xform.auth.key.length = 20;
}
@@ -993,39 +1014,6 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
printf("sessionless crypto: %s\n",
options->sessionless ? "enabled" : "disabled");
-#if 0
- options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
-
- /* Cipher Data */
- options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
- options->cipher_xform.next = NULL;
-
- options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- options->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
-
- generate_random_key(options->ckey_data, sizeof(options->ckey_data));
-
- options->cipher_xform.cipher.key.data = options->ckey_data;
- options->cipher_xform.cipher.key.phys_addr = 0;
- options->cipher_xform.cipher.key.length = 16;
-
-
- /* Authentication Data */
- options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
- options->auth_xform.next = NULL;
-
- options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
-
- options->auth_xform.auth.add_auth_data_length = 0;
- options->auth_xform.auth.digest_length = 20;
-
- generate_random_key(options->akey_data, sizeof(options->akey_data));
-
- options->auth_xform.auth.key.data = options->akey_data;
- options->auth_xform.auth.key.phys_addr = 0;
- options->auth_xform.auth.key.length = 20;
-#endif
}
/* Parse the argument given in the command line of the application */
@@ -1039,7 +1027,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
static struct option lgopts[] = {
{ "sessionless", no_argument, 0, 0 },
- { "cdev_type", required_argument, 0, 0 },
+ { "cdev", required_argument, 0, 0 },
{ "chain", required_argument, 0, 0 },
{ "cipher_algo", required_argument, 0, 0 },
@@ -1053,6 +1041,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
{ "iv", required_argument, 0, 0 },
{ "sessionless", no_argument, 0, 0 },
+
{ NULL, 0, 0, 0 }
};
@@ -1370,15 +1359,17 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n");
/* create the mbuf pool */
- l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 128,
- 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512,
+ sizeof(struct rte_crypto_op),
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
if (l2fwd_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
/* create crypto op pool */
- l2fwd_mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "mbuf_offload_pool", NB_MBUF, 128, 0, rte_socket_id());
- if (l2fwd_mbuf_ol_pool == NULL)
+ l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, 0,
+ rte_socket_id());
+ if (l2fwd_crypto_op_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
/* Enable Ethernet ports */
diff --git a/lib/Makefile b/lib/Makefile
index 6840f87..f254dba 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -36,7 +36,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_EAL) += librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_RING) += librte_ring
DIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += librte_mempool
DIRS-$(CONFIG_RTE_LIBRTE_MBUF) += librte_mbuf
-DIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += librte_mbuf_offload
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += librte_timer
DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile
DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index df0c0b8..e0c4fb9 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -44,7 +44,369 @@
extern "C" {
#endif
-#include <rte_crypto_sym.h>
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+#include "rte_crypto_sym.h"
+
+/** Crypto operation types */
+enum rte_crypto_op_type {
+ RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ /**< Undefined operation type */
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ /**< Symmetric operation */
+};
+
+/** Status of crypto operation */
+enum rte_crypto_op_status {
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ /**< Operation completed successfully */
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+ /**< Operation has not yet been processed by a crypto device */
+ RTE_CRYPTO_OP_STATUS_ENQUEUED,
+ /**< Operation is enqueued on device */
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
+ /**< Authentication verification failed */
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION,
+ /**<
+ * Symmetric operation failed due to invalid session arguments, or if
+ * in session-less mode, failed to allocate private operation material.
+ */
+ RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
+ /**< Operation failed due to invalid arguments in request */
+ RTE_CRYPTO_OP_STATUS_ERROR,
+ /**< Error handling operation */
+};
+
+/**
+ * Cryptographic Operation.
+ *
+ * This structure contains data relating to performing cryptographic
+ * operations. This operation structure is used to contain any operation which
+ * is supported by the cryptodev API, PMDs should check the type parameter to
+ * verify that the operation is a support function of the device. Crypto
+ * operations are enqueued and dequeued in crypto PMDs using the
+ * rte_cryptodev_enqueue_burst() / rte_cryptodev_dequeue_burst() .
+ */
+struct rte_crypto_op {
+ enum rte_crypto_op_type type;
+ /**< operation type */
+
+ enum rte_crypto_op_status status;
+ /**<
+ * operation status - this is reset to
+ * RTE_CRYPTO_OP_STATUS_NOT_PROCESSED on allocation from mempool and
+ * will be set to RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation
+ * is successfully processed by a crypto PMD
+ */
+
+ struct rte_mempool *mempool;
+ /**< crypto operation mempool which operation is allocated from */
+
+ phys_addr_t phys_addr;
+ /**< physical address of crypto operation */
+
+ void *opaque_data;
+ /**< Opaque pointer for user data */
+
+ union {
+ struct rte_crypto_sym_op *sym;
+ /**< Symmetric operation parameters */
+ }; /**< operation specific parameters */
+} __rte_cache_aligned;
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ * @param type The crypto operation type.
+ */
+static inline void
+__rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type)
+{
+ op->type = type;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (type) {
+ case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
+ /** Symmetric operation structure starts after the end of the
+ * rte_crypto_op structure.
+ */
+ op->sym = (struct rte_crypto_sym_op *)(op + 1);
+ op->type = type;
+
+ __rte_crypto_sym_op_reset(op->sym);
+ break;
+ default:
+ break;
+ }
+
+ op->opaque_data = NULL;
+}
+
+/**
+ * Private data structure belonging to a crypto symmetric operation pool.
+ */
+struct rte_crypto_op_pool_private {
+ enum rte_crypto_op_type type;
+ /**< Crypto op pool type operation. */
+ uint16_t priv_size;
+ /**< Size of private area in each crypto operation. */
+};
+
+
+/**
+ * Returns the size of private data allocated with each rte_crypto_op object by
+ * the mempool
+ *
+ * @param mempool rte_crypto_op mempool
+ *
+ * @return private data size
+ */
+static inline uint16_t
+__rte_crypto_op_get_priv_data_size(struct rte_mempool *mempool)
+{
+ struct rte_crypto_op_pool_private *priv =
+ rte_mempool_get_priv(mempool);
+
+ return priv->priv_size;
+}
+
+
+/**
+ * Creates a crypto operation pool
+ *
+ * @param name pool name
+ * @param type crypto operation type, use
+ * RTE_CRYPTO_OP_TYPE_UNDEFINED for a pool which
+ * supports all operation types
+ * @param nb_elts number of elements in pool
+ * @param cache_size Number of elements to cache on lcore, see
+ * *rte_mempool_create* for further details about
+ * cache size
+ * @param priv_size Size of private data to allocate with each
+ * operation
+ * @param socket_id Socket to allocate memory on
+ *
+ * @return
+ * - On success pointer to mempool
+ * - On failure NULL
+ */
+extern struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id);
+
+/**
+ * Bulk allocate raw element from mempool and return as crypto operations
+ *
+ * @param mempool crypto operation mempool.
+ * @param type crypto operation type.
+ * @param ops Array to place allocated crypto operations
+ * @param nb_ops Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns number of ops allocated
+ */
+static inline int
+__rte_crypto_op_raw_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_crypto_op_pool_private *priv;
+
+ priv = rte_mempool_get_priv(mempool);
+ if (unlikely(priv->type != type &&
+ priv->type != RTE_CRYPTO_OP_TYPE_UNDEFINED))
+ return -EINVAL;
+
+ if (rte_mempool_get_bulk(mempool, (void **)ops, nb_ops) == 0)
+ return nb_ops;
+
+ return 0;
+}
+
+/**
+ * Allocate a crypto operation from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_op *
+rte_crypto_op_alloc(struct rte_mempool *mempool, enum rte_crypto_op_type type)
+{
+ struct rte_crypto_op *op = NULL;
+ int retval;
+
+ retval = __rte_crypto_op_raw_bulk_alloc(mempool, type, &op, 1);
+ if (unlikely(retval != 1))
+ return NULL;
+
+ __rte_crypto_op_reset(op, type);
+
+ return op;
+}
+
+
+/**
+ * Bulk allocate crypto operations from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ * @param ops Array to place allocated crypto operations
+ * @param nb_ops Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+
+static inline unsigned
+rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ if (unlikely(__rte_crypto_op_raw_bulk_alloc(mempool, type, ops, nb_ops)
+ != nb_ops))
+ return 0;
+
+ for (i = 0; i < nb_ops; i++)
+ __rte_crypto_op_reset(ops[i], type);
+
+ return nb_ops;
+}
+
+
+
+/**
+ * Returns a pointer to the private data of a crypto operation if
+ * that operation has enough capacity for requested size.
+ *
+ * @param op crypto operation.
+ * @param size size of space requested in private data.
+ *
+ * @returns
+ * - if sufficient space available returns pointer to start of private data
+ * - if insufficient space returns NULL
+ */
+static inline void *
+__rte_crypto_op_get_priv_data(struct rte_crypto_op *op, uint32_t size)
+{
+ uint32_t priv_size;
+
+ if (likely(op->mempool != NULL)) {
+ priv_size = __rte_crypto_op_get_priv_data_size(op->mempool);
+
+ if (likely(priv_size >= size))
+ return (void *)((uint8_t *)(op + 1) +
+ sizeof(struct rte_crypto_sym_op));
+ }
+
+ return NULL;
+}
+
+/**
+ * free crypto operation structure
+ * If operation has been allocate from a rte_mempool, then the operation will
+ * be returned to the mempool.
+ *
+ * @param op symmetric crypto operation
+ */
+static inline void
+rte_crypto_op_free(struct rte_crypto_op *op)
+{
+ if (op != NULL && op->mempool != NULL)
+ rte_mempool_put(op->mempool, op);
+}
+
+/**
+ * Allocate a symmetric crypto operation in the private data of an mbuf.
+ *
+ * @param m mbuf which is associated with the crypto operation, the
+ * operation will be allocated in the private data of that
+ * mbuf.
+ *
+ * @returns
+ * - On success returns a pointer to the crypto operation.
+ * - On failure returns NULL.
+ */
+static inline struct rte_crypto_op *
+rte_crypto_sym_op_alloc_from_mbuf_priv_data(struct rte_mbuf *m)
+{
+ if (unlikely(m == NULL))
+ return NULL;
+
+ /*
+ * check that the mbuf's private data size is sufficient to contain a
+ * crypto operation
+ */
+ if (unlikely(m->priv_size < (sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op))))
+ return NULL;
+
+ /* private data starts immediately after the mbuf header in the mbuf. */
+ struct rte_crypto_op *op = (struct rte_crypto_op *)(m + 1);
+
+ __rte_crypto_op_reset(op, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+
+ op->mempool = NULL;
+ op->sym->m_src = m;
+
+ return op;
+}
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type and configures
+ * the chaining of the xforms in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+rte_crypto_op_sym_xforms_alloc(struct rte_crypto_op *op, uint8_t nb_xforms)
+{
+ void *priv_data;
+ uint32_t size;
+
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return NULL;
+
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
+
+ priv_data = __rte_crypto_op_get_priv_data(op, size);
+ if (priv_data == NULL)
+ return NULL;
+
+ return __rte_crypto_sym_op_sym_xforms_alloc(op->sym, priv_data,
+ nb_xforms);
+}
+
+
+/**
+ * Attach a session to a crypto operation
+ *
+ * @param op crypto operation, must be of type symmetric
+ * @param sess cryptodev session
+ */
+static inline int
+rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return -1;
+
+ return __rte_crypto_sym_op_attach_sym_session(op->sym, sess);
+}
#ifdef __cplusplus
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index cb2b8f6..831bbf1 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -46,6 +46,8 @@
extern "C" {
#endif
+#include <string.h>
+
#include <rte_mbuf.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -346,37 +348,24 @@ enum rte_crypto_sym_op_sess_type {
};
-/** Status of crypto operation */
-enum rte_crypto_op_status {
- RTE_CRYPTO_OP_STATUS_SUCCESS,
- /**< Operation completed successfully */
- RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
- /**< Operation not yet submitted to a cryptodev */
- RTE_CRYPTO_OP_STATUS_ENQUEUED,
- /**< Operation is enqueued on device */
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
- /**< Authentication verification failed */
- RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
- /**< Operation failed due to invalid arguments in request */
- RTE_CRYPTO_OP_STATUS_ERROR,
- /**< Error handling operation */
-};
+struct rte_cryptodev_sym_session;
/**
- * Cryptographic Operation Data.
+ * Symmetric Cryptographic Operation.
+ *
+ * This structure contains data relating to performing symmetric cryptographic
+ * processing on a referenced mbuf data buffer.
*
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
- * call for performing cipher, hash, or a combined hash and cipher operations.
+ * When a symmetric crypto operation is enqueued with the device for processing
+ * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
+ * which contains the source data which the crypto operation is to be performed
+ * on.
*/
struct rte_crypto_sym_op {
- enum rte_crypto_sym_op_sess_type type;
- enum rte_crypto_op_status status;
+ struct rte_mbuf *m_src; /**< source mbuf */
+ struct rte_mbuf *m_dst; /**< destination mbuf */
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
+ enum rte_crypto_sym_op_sess_type type;
union {
struct rte_cryptodev_sym_session *session;
@@ -387,7 +376,7 @@ struct rte_crypto_sym_op {
struct {
struct {
- uint32_t offset;
+ uint32_t offset;
/**< Starting point for cipher processing, specified
* as number of bytes from start of data in the source
* buffer. The result of the cipher operation will be
@@ -395,7 +384,7 @@ struct rte_crypto_sym_op {
* this location.
*/
- uint32_t length;
+ uint32_t length;
/**< The message length, in bytes, of the source buffer
* on which the cryptographic operation will be
* computed. This must be a multiple of the block size
@@ -414,17 +403,68 @@ struct rte_crypto_sym_op {
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
* field should be set to 0.
*/
- } to_cipher; /**< Data offsets and length for ciphering */
+ } data; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * Initialisation Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length
+ * is 96 bits) or J0 (for other sizes), where J0 is as
+ * defined by NIST SP800-38D. Regardless of the IV
+ * length, a full 16 bytes needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the
+ * nonce should be written starting at &iv[1] (to allow
+ * space for the implementation to write in the flags
+ * in the first byte). Note that a full 16 bytes should
+ * be allocated, even though the length field will
+ * have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from
+ * IEEE Std 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD
+ * be 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ uint16_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * length of the IV (which must be the same as the
+ * block length of the cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length
+ * of the counter (which must be the same as the block
+ * length of the cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs)
+ * or 16, in which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce,
+ * which can be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+ } cipher;
+ struct {
struct {
- uint32_t offset;
+ uint32_t offset;
/**< Starting point for hash processing, specified as
* number of bytes from start of packet in source
* buffer.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
+ * ignored. The field @ref aad field
* should be set instead.
*
* @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
@@ -432,180 +472,169 @@ struct rte_crypto_sym_op {
* of the AAD data in the source buffer.
*/
- uint32_t length;
+ uint32_t length;
/**< The message length, in bytes, of the source
* buffer that the hash will be computed on.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
+ * ignored. The field @ref aad field should be set
+ * instead.
*
* @note
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
* of operation, this field specifies the length of
* the AAD data in the source buffer.
*/
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
-
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
+ } data; /**< Data offsets and length for authentication */
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result
+ * should be inserted (in the case of digest generation)
+ * or where the purported digest exists (in the case of
+ * digest verification).
+ *
+ * At session creation time, the client specified the
+ * digest result length with the digest_length member
+ * of the @ref rte_crypto_auth_xform structure. For
+ * physical crypto devices the caller must allocate at
+ * least digest_length of physically contiguous memory
+ * at this location.
+ *
+ * For digest generation, the digest result will
+ * overwrite any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is
+ * understood to be in the destination buffer for
+ * digest generation, and in the source buffer for
+ * digest verification. The location of the digest
+ * result in this case is immediately following the
+ * region over which the digest is computed.
+ */
+ phys_addr_t phys_addr;
+ /**< Physical address of digest */
+ uint16_t length;
+ /**< Length of digest */
+ } digest; /**< Digest parameters */
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_sym_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
-
- void *user_data;
- /**< opaque pointer for user data */
-};
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD)
+ * needed for authenticated cipher mechanisms (CCM and
+ * GCM), and to the IV for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is
+ * set up for the session in the @ref
+ * rte_crypto_auth_xform structure as part of the @ref
+ * rte_cryptodev_session_create function call. This
+ * length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM),
+ * the caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset
+ * of one byte into the array, leaving room for the
+ * implementation to write in the flags to the first
+ * byte.
+ *
+ * - the additional authentication data itself should
+ * be written starting at an offset of 18 bytes into
+ * the array, leaving room for the length encoding in
+ * the first two bytes of the second block.
+ *
+ * - the array should be big enough to hold the above
+ * fields, plus any padding to round this up to the
+ * nearest multiple of the block size (16 bytes).
+ * Padding will be added by the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus
+ * any space to round this up to the nearest multiple
+ * of the block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set
+ * to 0. Instead the AAD data should be placed in the
+ * source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint16_t length; /**< Length of digest */
+ } aad;
+ /**< Additional authentication parameters */
+ } auth;
+} __rte_cache_aligned;
/**
- * Reset the fields of a crypto operation to their default values.
+ * Reset the fields of a symmetric operation to their default values.
*
* @param op The crypto operation to be reset.
*/
static inline void
__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
{
+ memset(op, 0, sizeof(*op));
+
op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
}
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type to
+ * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
+ * in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+__rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
+ void *priv_data, uint8_t nb_xforms)
+{
+ struct rte_crypto_sym_xform *xform;
+
+ sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
+
+ do {
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
+ xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
+ } while (xform);
+
+ return sym_op->xform;
+}
+
+
+/**
+ * Attach a session to a symmetric crypto operation
+ *
+ * @param op crypto operation
+ * @param sess cryptodev session
+ */
+static inline int
+__rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
struct rte_cryptodev_sym_session *sess)
{
- op->session = sess;
- op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+ sym_op->session = sess;
+ sym_op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+
+ return 0;
}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index c7fef6a..4632ca3 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1056,3 +1056,79 @@ rte_cryptodev_sym_session_free(uint8_t dev_id,
return NULL;
}
+
+/** Initialise rte_crypto_op mempool element */
+static void
+rte_crypto_op_init(struct rte_mempool *mempool,
+ void *opaque_arg,
+ void *_op_data,
+ __rte_unused unsigned i)
+{
+ struct rte_crypto_op *op = _op_data;
+ enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
+
+ memset(_op_data, 0, mempool->elt_size);
+
+ __rte_crypto_op_reset(op, type);
+
+ op->phys_addr = rte_mem_virt2phy(_op_data);
+ op->mempool = mempool;
+}
+
+
+struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id)
+{
+ struct rte_crypto_op_pool_private *priv;
+
+ unsigned elt_size = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) +
+ priv_size;
+
+ /* lookup mempool in case already allocated */
+ struct rte_mempool *mp = rte_mempool_lookup(name);
+
+ if (mp != NULL) {
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ if (mp->elt_size != elt_size ||
+ mp->cache_size < cache_size ||
+ mp->size < nb_elts ||
+ priv->priv_size < priv_size) {
+ mp = NULL;
+ CDEV_LOG_ERR("Mempool %s already exists but with "
+ "incompatible parameters", name);
+ return NULL;
+ }
+ return mp;
+ }
+
+ mp = rte_mempool_create(
+ name,
+ nb_elts,
+ elt_size,
+ cache_size,
+ sizeof(struct rte_crypto_op_pool_private),
+ NULL,
+ NULL,
+ rte_crypto_op_init,
+ &type,
+ socket_id,
+ 0);
+
+ if (mp == NULL) {
+ CDEV_LOG_ERR("Failed to create mempool %s", name);
+ return NULL;
+ }
+
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ priv->priv_size = priv_size;
+ priv->type = type;
+
+ return mp;
+}
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 732e2b9..9baf175 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -40,16 +40,12 @@
* Defines RTE Crypto Device APIs for the provisioning of cipher and
* authentication operations.
*
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
*/
#ifdef __cplusplus
extern "C" {
#endif
-#include "stddef.h"
-
#include "rte_crypto.h"
#include "rte_dev.h"
@@ -58,15 +54,18 @@ extern "C" {
#define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd")
/**< AES-NI Multi buffer PMD device name */
#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd")
-/**< Intel QAT SYM PMD device name */
+/**< Intel QAT Symmetric Crypto PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
- RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT SYM PMD */
+ RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT PMD Symmetric Crypto */
};
+
+extern const char **rte_cyptodev_names;
+
/* Logging Macros */
#define CDEV_LOG_ERR(fmt, args...) \
@@ -214,8 +213,6 @@ struct rte_cryptodev_config {
/**
* Configure a device.
*
- * EXPERIMENTAL: this API file may change without prior notice
- *
* This function must be invoked first before any other function in the
* API. This function can also be re-invoked when a device is in the
* stopped state.
@@ -411,12 +408,12 @@ rte_cryptodev_callback_unregister(uint8_t dev_id,
rte_cryptodev_cb_fn cb_fn, void *cb_arg);
-typedef uint16_t (*dequeue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Dequeue processed packets from queue pair of a device. */
-typedef uint16_t (*enqueue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Enqueue packets for processing on queue pair of a device. */
@@ -489,66 +486,65 @@ struct rte_cryptodev_data {
extern struct rte_cryptodev *rte_cryptodevs;
/**
*
- * Dequeue a burst of processed packets from a queue of the crypto device.
- * The dequeued packets are stored in *rte_mbuf* structures whose pointers are
- * supplied in the *pkts* array.
+ * Dequeue a burst of processed crypto operations from a queue on the crypto
+ * device. The dequeued operation are stored in *rte_crypto_op* structures
+ * whose pointers are supplied in the *ops* array.
*
- * The rte_crypto_dequeue_burst() function returns the number of packets
- * actually dequeued, which is the number of *rte_mbuf* data structures
- * effectively supplied into the *pkts* array.
+ * The rte_cryptodev_dequeue_burst() function returns the number of ops
+ * actually dequeued, which is the number of *rte_crypto_op* data structures
+ * effectively supplied into the *ops* array.
*
- * A return value equal to *nb_pkts* indicates that the queue contained
- * at least *rx_pkts* packets, and this is likely to signify that other
- * received packets remain in the input queue. Applications implementing
- * a "retrieve as much received packets as possible" policy can check this
- * specific case and keep invoking the rte_crypto_dequeue_burst() function
- * until a value less than *nb_pkts* is returned.
+ * A return value equal to *nb_ops* indicates that the queue contained
+ * at least *nb_ops* operations, and this is likely to signify that other
+ * processed operations remain in the devices output queue. Applications
+ * implementing a "retrieve as many processed operations as possible" policy
+ * can check this specific case and keep invoking the
+ * rte_cryptodev_dequeue_burst() function until a value less than
+ * *nb_ops* is returned.
*
- * The rte_crypto_dequeue_burst() function does not provide any error
+ * The rte_cryptodev_dequeue_burst() function does not provide any error
* notification to avoid the corresponding overhead.
*
- * @param dev_id The identifier of the device.
+ * @param dev_id The symmetric crypto device identifier
* @param qp_id The index of the queue pair from which to
* retrieve processed packets. The value must be
* in the range [0, nb_queue_pair - 1] previously
* supplied to rte_cryptodev_configure().
- * @param pkts The address of an array of pointers to
- * *rte_mbuf* structures that must be large enough
- * to store *nb_pkts* pointers in it.
- * @param nb_pkts The maximum number of packets to dequeue.
+ * @param ops The address of an array of pointers to
+ * *rte_crypto_op* structures that must be
+ * large enough to store *nb_ops* pointers in it.
+ * @param nb_ops The maximum number of operations to dequeue.
*
* @return
- * - The number of packets actually dequeued, which is the number
- * of pointers to *rte_mbuf* structures effectively supplied to the
- * *pkts* array.
+ * - The number of operations actually dequeued, which is the number
+ * of pointers to *rte_crypto_op* structures effectively supplied to the
+ * *ops* array.
*/
static inline uint16_t
rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
- nb_pkts = (*dev->dequeue_burst)
- (dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ nb_ops = (*dev->dequeue_burst)
+ (dev->data->queue_pairs[qp_id], ops, nb_ops);
- return nb_pkts;
+ return nb_ops;
}
/**
- * Enqueue a burst of packets for processing on a crypto device.
- *
- * The rte_crypto_enqueue_burst() function is invoked to place packets
- * on the queue *queue_id* of the device designated by its *dev_id*.
+ * Enqueue a burst of operations for processing on a crypto device.
*
- * The *nb_pkts* parameter is the number of packets to process which are
- * supplied in the *pkts* array of *rte_mbuf* structures.
+ * The rte_cryptodev_enqueue_burst() function is invoked to place
+ * crypto operations on the queue *qp_id* of the device designated by
+ * its *dev_id*.
*
- * The rte_crypto_enqueue_burst() function returns the number of packets it
- * actually sent. A return value equal to *nb_pkts* means that all packets
- * have been sent.
+ * The *nb_ops* parameter is the number of operations to process which are
+ * supplied in the *ops* array of *rte_crypto_op* structures.
*
- * Each mbuf in the *pkts* array must have a valid *rte_mbuf_offload* structure
- * attached which contains a valid crypto operation.
+ * The rte_cryptodev_enqueue_burst() function returns the number of
+ * operations it actually enqueued for processing. A return value equal to
+ * *nb_ops* means that all packets have been enqueued.
*
* @param dev_id The identifier of the device.
* @param qp_id The index of the queue pair which packets are
@@ -556,25 +552,25 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
* must be in the range [0, nb_queue_pairs - 1]
* previously supplied to
* *rte_cryptodev_configure*.
- * @param pkts The address of an array of *nb_pkts* pointers
- * to *rte_mbuf* structures which contain the
- * output packets.
- * @param nb_pkts The number of packets to transmit.
+ * @param ops The address of an array of *nb_ops* pointers
+ * to *rte_crypto_op* structures which contain
+ * the crypto operations to be processed.
+ * @param nb_ops The number of operations to process.
*
* @return
- * The number of packets actually enqueued on the crypto device. The return
- * value can be less than the value of the *nb_pkts* parameter when the
- * crypto devices queue is full or has been filled up.
- * The number of packets is 0 if the device hasn't been started.
+ * The number of operations actually enqueued on the crypto device. The return
+ * value can be less than the value of the *nb_ops* parameter when the
+ * crypto devices queue is full or if invalid parameters are specified in
+ * a *rte_crypto_op*.
*/
static inline uint16_t
rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
return (*dev->enqueue_burst)(
- dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ dev->data->queue_pairs[qp_id], ops, nb_ops);
}
@@ -620,7 +616,6 @@ extern struct rte_cryptodev_sym_session *
rte_cryptodev_sym_session_create(uint8_t dev_id,
struct rte_crypto_sym_xform *xform);
-
/**
* Free the memory associated with a previously allocated session.
*
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index a46af6f..b682184 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -1,4 +1,4 @@
-DPDK_2.2 {
+DPDK_16.04 {
global:
rte_cryptodevs;
@@ -27,6 +27,7 @@ DPDK_2.2 {
rte_cryptodev_queue_pair_setup;
rte_cryptodev_queue_pair_start;
rte_cryptodev_queue_pair_stop;
+ rte_crypto_op_pool_create;
local: *;
-};
+};
\ No newline at end of file
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index e39ad28..52f96c3 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -728,9 +728,6 @@ typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */
typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
* with a single assignment */
-/** Opaque rte_mbuf_offload structure declarations */
-struct rte_mbuf_offload;
-
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
@@ -847,9 +844,6 @@ struct rte_mbuf {
/** Timesync flags for use with IEEE1588. */
uint16_t timesync;
-
- /* Chain of off-load operations to perform on mbuf */
- struct rte_mbuf_offload *offload_ops;
} __rte_cache_aligned;
static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
diff --git a/lib/librte_mbuf_offload/Makefile b/lib/librte_mbuf_offload/Makefile
deleted file mode 100644
index acdb449..0000000
--- a/lib/librte_mbuf_offload/Makefile
+++ /dev/null
@@ -1,52 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2015 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_mbuf_offload.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-EXPORT_MAP := rte_mbuf_offload_version.map
-
-LIBABIVER := 1
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) := rte_mbuf_offload.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD)-include := rte_mbuf_offload.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_cryptodev
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.c b/lib/librte_mbuf_offload/rte_mbuf_offload.c
deleted file mode 100644
index 5c0c9dd..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <rte_common.h>
-
-#include "rte_mbuf_offload.h"
-
-/** Initialize rte_mbuf_offload structure */
-static void
-rte_pktmbuf_offload_init(struct rte_mempool *mp,
- __rte_unused void *opaque_arg,
- void *_op_data,
- __rte_unused unsigned i)
-{
- struct rte_mbuf_offload *ol = _op_data;
-
- memset(_op_data, 0, mp->elt_size);
-
- ol->type = RTE_PKTMBUF_OL_NOT_SPECIFIED;
- ol->mp = mp;
-}
-
-
-struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id)
-{
- struct rte_pktmbuf_offload_pool_private *priv;
- unsigned elt_size = sizeof(struct rte_mbuf_offload) + priv_size;
-
-
- /* lookup mempool in case already allocated */
- struct rte_mempool *mp = rte_mempool_lookup(name);
-
- if (mp != NULL) {
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- if (priv->offload_priv_size < priv_size ||
- mp->elt_size != elt_size ||
- mp->cache_size < cache_size ||
- mp->size < size) {
- mp = NULL;
- return NULL;
- }
- return mp;
- }
-
- mp = rte_mempool_create(
- name,
- size,
- elt_size,
- cache_size,
- sizeof(struct rte_pktmbuf_offload_pool_private),
- NULL,
- NULL,
- rte_pktmbuf_offload_init,
- NULL,
- socket_id,
- 0);
-
- if (mp == NULL)
- return NULL;
-
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- priv->offload_priv_size = priv_size;
- return mp;
-}
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
deleted file mode 100644
index 5ce6058..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_MBUF_OFFLOAD_H_
-#define _RTE_MBUF_OFFLOAD_H_
-
-/**
- * @file
- * RTE mbuf offload
- *
- * The rte_mbuf_offload library provides the ability to specify a device generic
- * off-load operation independent of the current Rx/Tx Ethernet offloads
- * supported within the rte_mbuf structure, and add supports for multiple
- * off-load operations and offload device types.
- *
- * The rte_mbuf_offload specifies the particular off-load operation type, such
- * as a crypto operation, and provides a container for the operations
- * parameter's inside the op union. These parameters are then used by the
- * device which supports that operation to perform the specified offload.
- *
- * This library provides an API to create pre-allocated mempool of offload
- * operations, with supporting allocate and free functions. It also provides
- * APIs for attaching an offload to a mbuf, as well as an API to retrieve a
- * specified offload type from an mbuf offload chain.
- *
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- */
-
-#include <rte_mbuf.h>
-#include <rte_crypto.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** packet mbuf offload operation types */
-enum rte_mbuf_ol_op_type {
- RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
- /**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO_SYM
- /**< Crypto offload operation */
-};
-
-/**
- * Generic packet mbuf offload
- * This is used to specify a offload operation to be performed on a rte_mbuf.
- * Multiple offload operations can be chained to the same mbuf, but only a
- * single offload operation of a particular type can be in the chain
- */
-struct rte_mbuf_offload {
- struct rte_mbuf_offload *next; /**< next offload in chain */
- struct rte_mbuf *m; /**< mbuf offload is attached to */
- struct rte_mempool *mp; /**< mempool offload allocated from */
-
- enum rte_mbuf_ol_op_type type; /**< offload type */
- union {
- struct rte_crypto_sym_op crypto; /**< Crypto operation */
- } op;
-};
-
-/**< private data structure belonging to packet mbug offload mempool */
-struct rte_pktmbuf_offload_pool_private {
- uint16_t offload_priv_size;
- /**< Size of private area in each mbuf_offload. */
-};
-
-
-/**
- * Creates a mempool of rte_mbuf_offload objects
- *
- * EXPERIMENTAL: this API file may change without prior notice
- *
- * @param name mempool name
- * @param size number of objects in mempool
- * @param cache_size cache size of objects for each core
- * @param priv_size size of private data to be allocated with each
- * rte_mbuf_offload object
- * @param socket_id Socket on which to allocate mempool objects
- *
- * @return
- * - On success returns a valid mempool of rte_mbuf_offload objects
- * - On failure return NULL
- */
-extern struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id);
-
-
-/**
- * Returns private data size allocated with each rte_mbuf_offload object by
- * the mempool
- *
- * @param mpool rte_mbuf_offload mempool
- *
- * @return private data size
- */
-static inline uint16_t
-__rte_pktmbuf_offload_priv_size(struct rte_mempool *mpool)
-{
- struct rte_pktmbuf_offload_pool_private *priv =
- (struct rte_pktmbuf_offload_pool_private *)rte_mempool_get_priv(mpool);
-
- return priv->offload_priv_size;
-}
-
-/**
- * Get specified off-load operation type from mbuf.
- *
- * @param m packet mbuf.
- * @param type offload operation type requested.
- *
- * @return
- * - On success retruns rte_mbuf_offload pointer
- * - On failure returns NULL
- *
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_get(struct rte_mbuf *m, enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol;
-
- for (ol = m->offload_ops; ol != NULL; ol = ol->next)
- if (ol->type == type)
- return ol;
-
- return ol;
-}
-
-/**
- * Attach a rte_mbuf_offload to a mbuf. We only support a single offload of any
- * one type in our chain of offloads.
- *
- * @param m packet mbuf.
- * @param ol rte_mbuf_offload strucutre to be attached
- *
- * @returns
- * - On success returns the pointer to the offload we just added
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_attach(struct rte_mbuf *m, struct rte_mbuf_offload *ol)
-{
- struct rte_mbuf_offload **ol_last;
-
- for (ol_last = &m->offload_ops; ol_last[0] != NULL;
- ol_last = &ol_last[0]->next)
- if (ol_last[0]->type == ol->type)
- return NULL;
-
- ol_last[0] = ol;
- ol_last[0]->m = m;
- ol_last[0]->next = NULL;
-
- return ol_last[0];
-}
-
-
-/** Rearms rte_mbuf_offload default parameters */
-static inline void
-__rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
- enum rte_mbuf_ol_op_type type)
-{
- ol->m = NULL;
- ol->type = type;
-
- switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO_SYM:
- __rte_crypto_sym_op_reset(&ol->op.crypto); break;
- default:
- break;
- }
-}
-
-/** Allocate rte_mbuf_offload from mempool */
-static inline struct rte_mbuf_offload *
-__rte_pktmbuf_offload_raw_alloc(struct rte_mempool *mp)
-{
- void *buf = NULL;
-
- if (rte_mempool_get(mp, &buf) < 0)
- return NULL;
-
- return (struct rte_mbuf_offload *)buf;
-}
-
-/**
- * Allocate a rte_mbuf_offload with a specified operation type from
- * rte_mbuf_offload mempool
- *
- * @param mpool rte_mbuf_offload mempool
- * @param type offload operation type
- *
- * @returns
- * - On success returns a valid rte_mbuf_offload structure
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_alloc(struct rte_mempool *mpool,
- enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol = __rte_pktmbuf_offload_raw_alloc(mpool);
-
- if (ol != NULL)
- __rte_pktmbuf_offload_reset(ol, type);
-
- return ol;
-}
-
-/**
- * free rte_mbuf_offload structure
- */
-static inline void
-rte_pktmbuf_offload_free(struct rte_mbuf_offload *ol)
-{
- if (ol != NULL && ol->mp != NULL)
- rte_mempool_put(ol->mp, ol);
-}
-
-/**
- * Checks if the private data of a rte_mbuf_offload has enough capacity for
- * requested size
- *
- * @returns
- * - if sufficient space available returns pointer to start of private data
- * - if insufficient space returns NULL
- */
-static inline void *
-__rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
- uint16_t size)
-{
- uint16_t priv_size;
-
- if (likely(ol->mp != NULL)) {
- priv_size = __rte_pktmbuf_offload_priv_size(ol->mp);
-
- if (likely(priv_size >= size))
- return (void *)(ol + 1);
- }
- return NULL;
-}
-
-/**
- * Allocate space for crypto xforms in the private data space of the
- * rte_mbuf_offload. This also defaults the crypto xform type and configures
- * the chaining of the xform in the crypto operation
- *
- * @return
- * - On success returns pointer to first crypto xform in crypto operations chain
- * - On failure returns NULL
- */
-static inline struct rte_crypto_sym_xform *
-rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
- unsigned nb_xforms)
-{
- struct rte_crypto_sym_xform *xform;
- void *priv_data;
- uint16_t size;
-
- size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
- priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
-
- if (priv_data == NULL)
- return NULL;
-
- ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
-
- do {
- xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
- xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
- } while (xform);
-
- return ol->op.crypto.xform;
-}
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MBUF_OFFLOAD_H_ */
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map b/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
deleted file mode 100644
index 3d3b06a..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
+++ /dev/null
@@ -1,7 +0,0 @@
-DPDK_2.2 {
- global:
-
- rte_pktmbuf_offload_pool_create;
-
- local: *;
-};
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v5 0/2] cryptodev API changes
2016-03-04 17:17 ` [dpdk-dev] [PATCH v5 " Fiona Trahe
@ 2016-03-04 17:38 ` Thomas Monjalon
2016-03-04 17:43 ` Trahe, Fiona
2016-03-04 17:39 ` Trahe, Fiona
2016-03-15 6:48 ` Cao, Min
2 siblings, 1 reply; 62+ messages in thread
From: Thomas Monjalon @ 2016-03-04 17:38 UTC (permalink / raw)
To: Fiona Trahe; +Cc: dev
2016-03-04 17:17, Fiona Trahe:
> This patch set separates the symmetric crypto operations from generic operations and then modifies the cryptodev burst API to accept bursts of rte_crypto_op rather than rte_mbufs.
>
> V5:
> - updates .map file
> - removes EXPERIMENTAL label from rte_cryptodev.h
Why do you want to remove the experimental label?
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v5 0/2] cryptodev API changes
2016-03-04 17:17 ` [dpdk-dev] [PATCH v5 " Fiona Trahe
2016-03-04 17:38 ` Thomas Monjalon
@ 2016-03-04 17:39 ` Trahe, Fiona
2016-03-15 6:48 ` Cao, Min
2 siblings, 0 replies; 62+ messages in thread
From: Trahe, Fiona @ 2016-03-04 17:39 UTC (permalink / raw)
To: dev
> -----Original Message-----
> From: Trahe, Fiona
> Sent: Friday, March 04, 2016 5:18 PM
> To: dev@dpdk.org
> Cc: Doherty, Declan; Trahe, Fiona
> Subject: [PATCH v5 0/2] cryptodev API changes
>
> This patch set separates the symmetric crypto operations from generic
> operations and then modifies the cryptodev burst API to accept bursts of
> rte_crypto_op rather than rte_mbufs.
>
> V5:
> - updates .map file
> - removes EXPERIMENTAL label from rte_cryptodev.h
>
> V4:
> - Fixes for issues introduced in __rte_crypto_op_raw_bulk_alloc in V3 patcheset.
> - Typo fix in cached attribute on rte_crypto_op structure.
>
> V3:
> - Addresses V2 comments
> - Rebased for head
>
>
> Declan Doherty (1):
> cryptodev: change burst API to be crypto op oriented
>
> Fiona Trahe (1):
> cryptodev: API tidy and changes to support future extensions
>
>
> MAINTAINERS | 6 +-
> app/test/test_cryptodev.c | 894 +++++++++++----------
> app/test/test_cryptodev.h | 9 +-
> app/test/test_cryptodev_perf.c | 270 ++++---
> config/common_bsdapp | 8 -
> config/common_linuxapp | 8 -
> doc/api/doxy-api-index.md | 1 -
> drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 199 ++---
> drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 18 +-
> drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 6 +-
> drivers/crypto/qat/qat_crypto.c | 150 ++--
> drivers/crypto/qat/qat_crypto.h | 14 +-
> drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
> examples/l2fwd-crypto/main.c | 300 ++++---
> lib/Makefile | 1 -
> lib/librte_cryptodev/Makefile | 1 +
> lib/librte_cryptodev/rte_crypto.h | 819 +++++++------------
> lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
> lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
> lib/librte_cryptodev/rte_cryptodev.h | 189 ++---
> lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
> lib/librte_cryptodev/rte_cryptodev_version.map | 7 +-
> lib/librte_mbuf/rte_mbuf.h | 6 -
> lib/librte_mbuf_offload/Makefile | 52 --
> lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
> lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 -------
> .../rte_mbuf_offload_version.map | 7 -
> 27 files changed, 2145 insertions(+), 2027 deletions(-) create mode 100644
> lib/librte_cryptodev/rte_crypto_sym.h
> delete mode 100644 lib/librte_mbuf_offload/Makefile delete mode 100644
> lib/librte_mbuf_offload/rte_mbuf_offload.c
> delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
> delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
>
> --
> 2.1.0
NACK - email subject of patches got screwed up. I'll resend.
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v5 0/2] cryptodev API changes
2016-03-04 17:38 ` Thomas Monjalon
@ 2016-03-04 17:43 ` Trahe, Fiona
2016-03-04 17:45 ` Thomas Monjalon
0 siblings, 1 reply; 62+ messages in thread
From: Trahe, Fiona @ 2016-03-04 17:43 UTC (permalink / raw)
To: Thomas Monjalon; +Cc: dev
> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas.monjalon@6wind.com]
> Sent: Friday, March 04, 2016 5:39 PM
> To: Trahe, Fiona
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v5 0/2] cryptodev API changes
>
> 2016-03-04 17:17, Fiona Trahe:
> > This patch set separates the symmetric crypto operations from generic
> operations and then modifies the cryptodev burst API to accept bursts of
> rte_crypto_op rather than rte_mbufs.
> >
> > V5:
> > - updates .map file
> > - removes EXPERIMENTAL label from rte_cryptodev.h
>
> Why do you want to remove the experimental label?
Declan had already removed from the MAINTAINERS file and other places in previous patches.
This one was just forgotten as far as we know, though as Declan is out we can't confirm with him.
Do you think we should keep the label ?
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v5 0/2] cryptodev API changes
2016-03-04 17:43 ` Trahe, Fiona
@ 2016-03-04 17:45 ` Thomas Monjalon
2016-03-04 18:01 ` Trahe, Fiona
0 siblings, 1 reply; 62+ messages in thread
From: Thomas Monjalon @ 2016-03-04 17:45 UTC (permalink / raw)
To: Trahe, Fiona; +Cc: dev
2016-03-04 17:43, Trahe, Fiona:
> From: Thomas Monjalon [mailto:thomas.monjalon@6wind.com]
> > 2016-03-04 17:17, Fiona Trahe:
> > > This patch set separates the symmetric crypto operations from generic
> > operations and then modifies the cryptodev burst API to accept bursts of
> > rte_crypto_op rather than rte_mbufs.
> > >
> > > V5:
> > > - updates .map file
> > > - removes EXPERIMENTAL label from rte_cryptodev.h
> >
> > Why do you want to remove the experimental label?
>
> Declan had already removed from the MAINTAINERS file and other places in previous patches.
> This one was just forgotten as far as we know, though as Declan is out we can't confirm with him.
> Do you think we should keep the label ?
I cannot really decide because it's hard to understand the improvements in
a such big patch. It would be easier if the changes were split in several
steps.
Maybe the last step (last patch) would be to remove the flag. Having it in
a separate patch would give the opportunity to detail the reason of the
removal in the commit message.
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v5 0/2] cryptodev API changes
2016-03-04 17:45 ` Thomas Monjalon
@ 2016-03-04 18:01 ` Trahe, Fiona
0 siblings, 0 replies; 62+ messages in thread
From: Trahe, Fiona @ 2016-03-04 18:01 UTC (permalink / raw)
To: Thomas Monjalon; +Cc: dev
> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas.monjalon@6wind.com]
> Sent: Friday, March 04, 2016 5:46 PM
> To: Trahe, Fiona
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v5 0/2] cryptodev API changes
>
> 2016-03-04 17:43, Trahe, Fiona:
> > From: Thomas Monjalon [mailto:thomas.monjalon@6wind.com]
> > > 2016-03-04 17:17, Fiona Trahe:
> > > > This patch set separates the symmetric crypto operations from
> > > > generic
> > > operations and then modifies the cryptodev burst API to accept
> > > bursts of rte_crypto_op rather than rte_mbufs.
> > > >
> > > > V5:
> > > > - updates .map file
> > > > - removes EXPERIMENTAL label from rte_cryptodev.h
> > >
> > > Why do you want to remove the experimental label?
> >
> > Declan had already removed from the MAINTAINERS file and other places in
> previous patches.
> > This one was just forgotten as far as we know, though as Declan is out we
> can't confirm with him.
> > Do you think we should keep the label ?
>
> I cannot really decide because it's hard to understand the improvements in a
> such big patch. It would be easier if the changes were split in several steps.
> Maybe the last step (last patch) would be to remove the flag. Having it in a
> separate patch would give the opportunity to detail the reason of the removal in
> the commit message.
Fair enough.
I'll put the label back in.
And we'll handle later with a separate explicit commit to give people a chance to comment.
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v6 0/2] cryptodev API changes
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 " Declan Doherty
` (5 preceding siblings ...)
2016-03-04 17:17 ` [dpdk-dev] [PATCH v5 2/2] This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts rte_crypto_op's rather than the current implementation which operates on rte_mbuf bursts, this simplifies the burst processing in the crypto PMDs and the use of crypto operations in general Fiona Trahe
@ 2016-03-04 18:29 ` Fiona Trahe
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 " Fiona Trahe
` (3 more replies)
2016-03-04 18:29 ` [dpdk-dev] [PATCH v6 1/2] cryptodev: API tidy and changes to support future extensions Fiona Trahe
` (2 subsequent siblings)
9 siblings, 4 replies; 62+ messages in thread
From: Fiona Trahe @ 2016-03-04 18:29 UTC (permalink / raw)
To: dev
This patch set separates the symmetric crypto operations from generic operations and then modifies the cryptodev burst API to accept bursts of rte_crypto_op rather than rte_mbufs.
v6:
- restore EXPERIMENTAL label to cryptodev. Will handle removal in separate thread.
(email subject was incorrect in v5, so v5 hasn't arrived in patchwork, therefore v6 is in-reply-to v4 message id)
V5:
- updates .map file
- removes EXPERIMENTAL label from rte_cryptodev.h
V4:
- Fixes for issues introduced in __rte_crypto_op_raw_bulk_alloc in V3 patcheset.
- Typo fix in cached attribute on rte_crypto_op structure.
V3:
- Addresses V2 comments
- Rebased for head
Declan Doherty (1):
cryptodev: change burst API to be crypto op oriented
Fiona Trahe (1):
cryptodev: API tidy and changes to support future extensions
MAINTAINERS | 4 -
app/test/test_cryptodev.c | 894 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 270 ++++---
config/common_bsdapp | 9 +-
config/common_linuxapp | 9 +-
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 199 ++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 18 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 6 +-
drivers/crypto/qat/qat_crypto.c | 150 ++--
drivers/crypto/qat/qat_crypto.h | 14 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 300 ++++---
lib/Makefile | 1 -
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 819 +++++++------------
lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
lib/librte_cryptodev/rte_cryptodev.h | 191 ++---
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 7 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 -------
.../rte_mbuf_offload_version.map | 7 -
27 files changed, 2148 insertions(+), 2026 deletions(-)
create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
delete mode 100644 lib/librte_mbuf_offload/Makefile
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v6 1/2] cryptodev: API tidy and changes to support future extensions
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 " Declan Doherty
` (6 preceding siblings ...)
2016-03-04 18:29 ` [dpdk-dev] [PATCH v6 0/2] cryptodev API changes Fiona Trahe
@ 2016-03-04 18:29 ` Fiona Trahe
2016-03-04 18:29 ` [dpdk-dev] [PATCH v6 2/2] cryptodev: change burst API to be crypto op oriented Fiona Trahe
2016-03-15 6:57 ` [dpdk-dev] [PATCH v4 0/2] cryptodev API changes Cao, Min
9 siblings, 0 replies; 62+ messages in thread
From: Fiona Trahe @ 2016-03-04 18:29 UTC (permalink / raw)
To: dev
This patch splits symmetric specific definitions and
functions away from the common crypto APIs to facilitate the future extension
and expansion of the cryptodev framework, in order to allow asymmetric
crypto operations to be introduced at a later date, as well as to clean the
logical structure of the public includes. The patch also introduces the _sym
prefix to symmetric specific structure and functions to improve clarity in
the API.
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
---
app/test/test_cryptodev.c | 164 +++---
app/test/test_cryptodev_perf.c | 79 +--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 44 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 6 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 4 +-
drivers/crypto/qat/qat_crypto.c | 51 +-
drivers/crypto/qat/qat_crypto.h | 10 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 33 +-
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 563 +------------------
lib/librte_cryptodev/rte_crypto_sym.h | 613 +++++++++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 39 +-
lib/librte_cryptodev/rte_cryptodev.h | 80 ++-
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 6 +-
lib/librte_mbuf_offload/rte_mbuf_offload.h | 22 +-
17 files changed, 915 insertions(+), 840 deletions(-)
create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 62f8fb0..951b443 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,13 +57,13 @@ struct crypto_testsuite_params {
};
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
struct rte_mbuf_offload *ol;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -78,7 +78,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_param);
@@ -165,7 +165,8 @@ testsuite_setup(void)
ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
"MBUF_OFFLOAD_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_xform),
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -220,7 +221,7 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id,
&ts_params->conf),
@@ -275,7 +276,7 @@ ut_setup(void)
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
ts_params->conf.session_mp.nb_objs =
- (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_PMD) ?
+ (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_SYM_PMD) ?
DEFAULT_NUM_OPS_INFLIGHT :
DEFAULT_NUM_OPS_INFLIGHT;
@@ -319,7 +320,7 @@ ut_teardown(void)
/* free crypto session structure */
if (ut_params->sess) {
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
ut_params->sess);
ut_params->sess = NULL;
}
@@ -464,7 +465,7 @@ test_queue_pair_descriptor_setup(void)
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- ts_params->conf.session_mp.nb_objs = dev_info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = dev_info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
&ts_params->conf), "Failed to configure cryptodev %u",
@@ -766,7 +767,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -776,7 +777,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -786,20 +787,21 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -862,18 +864,18 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_xforms(
+ TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
ut_params->ol, 2),
"failed to allocate space for crypto transforms");
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
/* cipher parameters */
ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
@@ -882,7 +884,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
@@ -958,7 +960,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_SHA1);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -967,7 +969,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -977,13 +979,14 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -991,7 +994,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1066,7 +1069,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1075,7 +1078,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1085,13 +1088,14 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1099,7 +1103,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1168,7 +1172,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_SHA256);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1177,7 +1181,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1187,13 +1191,14 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1201,7 +1206,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1282,7 +1287,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1291,7 +1296,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1301,15 +1306,16 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1317,7 +1323,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1371,7 +1377,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params);
@@ -1386,8 +1392,9 @@ test_AES_CBC_HMAC_SHA512_decrypt_digest_verify(void)
"Failed to create session params");
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
return test_AES_CBC_HMAC_SHA512_decrypt_perform(ut_params->sess,
@@ -1400,7 +1407,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
{
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1409,7 +1416,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1422,7 +1429,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params)
{
@@ -1442,7 +1449,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1450,7 +1457,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1522,7 +1529,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1531,7 +1538,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1541,13 +1548,14 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1555,7 +1563,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)
rte_pktmbuf_prepend(ut_params->ibuf,
@@ -1615,7 +1623,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_AES_XCBC);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1624,7 +1632,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1634,13 +1642,14 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1648,7 +1657,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
@@ -1752,7 +1761,7 @@ test_multi_session(void)
struct crypto_unittest_params *ut_params = &unittest_params;
struct rte_cryptodev_info dev_info;
- struct rte_cryptodev_session **sessions;
+ struct rte_cryptodev_sym_session **sessions;
uint16_t i;
@@ -1761,12 +1770,13 @@ test_multi_session(void)
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- sessions = rte_malloc(NULL, (sizeof(struct rte_cryptodev_session *) *
- dev_info.max_nb_sessions) + 1, 0);
+ sessions = rte_malloc(NULL,
+ (sizeof(struct rte_cryptodev_sym_session *) *
+ dev_info.sym.max_nb_sessions) + 1, 0);
/* Create multiple crypto sessions*/
- for (i = 0; i < dev_info.max_nb_sessions; i++) {
- sessions[i] = rte_cryptodev_session_create(
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++) {
+ sessions[i] = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(sessions[i],
@@ -1781,13 +1791,13 @@ test_multi_session(void)
}
/* Next session create should fail */
- sessions[i] = rte_cryptodev_session_create(ts_params->valid_devs[0],
+ sessions[i] = rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NULL(sessions[i],
"Session creation succeeded unexpectedly!");
- for (i = 0; i < dev_info.max_nb_sessions; i++)
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++)
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
sessions[i]);
rte_free(sessions);
@@ -1806,7 +1816,7 @@ test_not_in_place_crypto(void)
/* Create multiple crypto sessions*/
- ut_params->sess = rte_cryptodev_session_create(
+ ut_params->sess = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0], &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1828,7 +1838,7 @@ test_not_in_place_crypto(void)
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1836,7 +1846,7 @@ test_not_in_place_crypto(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1962,7 +1972,7 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite = {
static int
test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_qat_testsuite);
}
static struct test_command cryptodev_qat_cmd = {
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 728bcf0..42dd9bc 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -63,12 +63,12 @@ struct crypto_testsuite_params {
#define MAX_NUM_OF_OPS_PER_UT (128)
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf_offload *ol;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
@@ -127,7 +127,7 @@ testsuite_setup(void)
ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -179,7 +179,7 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->dev_id,
&ts_params->conf),
@@ -252,7 +252,7 @@ ut_teardown(void)
/* free crypto session structure */
if (ut_params->sess)
- rte_cryptodev_session_free(ts_params->dev_id,
+ rte_cryptodev_sym_session_free(ts_params->dev_id,
ut_params->sess);
/* free crypto operation structure */
@@ -1713,7 +1713,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1723,7 +1723,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1733,7 +1733,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1741,7 +1741,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < num_to_submit ; b++) {
tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ (const char *)data_params[0].expected.ciphertext,
data_params[0].length, 0);
TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
@@ -1753,12 +1753,12 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
DIGEST_BYTE_LENGTH_SHA256);
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO);
+ ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
@@ -1814,7 +1814,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, burst_size);
if (burst_received == 0)
failed_polls++;
@@ -1824,10 +1825,13 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
total_cycles += end_cycles - start_cycles;
}
while (num_received != num_to_submit) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num,
+ 0, NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, burst_size);
if (burst_received == 0)
failed_polls++;
@@ -1876,7 +1880,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1885,7 +1889,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
ut_params->cipher_xform.cipher.key.length = CIPHER_IV_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1895,7 +1899,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1928,12 +1932,12 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1957,15 +1961,19 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent) < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent : DEFAULT_BURST_SIZE);
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, tx_mbufs,
+ ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
+ < DEFAULT_BURST_SIZE) ?
+ DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
+ DEFAULT_BURST_SIZE);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
@@ -1973,10 +1981,13 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
num_received += burst_received;
}
while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num,
+ 0, NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num, 0,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num, 0,
rx_mbufs, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
@@ -1987,7 +1998,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
mmps = (double)num_received*mhz/(end_cycles - start_cycles);
throughput = mmps*data_params[index].length*8;
printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
- data_params[index].length, num_sent, num_received);
+ data_params[index].length, num_sent, num_received);
printf("\t%.2f\t%u", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
@@ -2044,7 +2055,7 @@ perftest_aesni_mb_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
static int
perftest_qat_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_testsuite);
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index c7d884c..61d93cd 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -109,7 +109,7 @@ calculate_auth_precomputes(hash_one_block_t one_block_hash,
/** Get xform chain order */
static int
-aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
{
/*
* Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
@@ -119,12 +119,12 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
if (xform->next == NULL || xform->next->next != NULL)
return -1;
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return HASH_CIPHER;
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return CIPHER_HASH;
return -1;
@@ -134,11 +134,11 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
static int
aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
hash_one_block_t hash_oneblock_fn;
- if (xform->type != RTE_CRYPTO_XFORM_AUTH) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
MB_LOG_ERR("Crypto xform struct not of type auth");
return -1;
}
@@ -196,11 +196,11 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
static int
aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
aes_keyexp_t aes_keyexp_fn;
- if (xform->type != RTE_CRYPTO_XFORM_CIPHER) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
MB_LOG_ERR("Crypto xform struct not of type cipher");
return -1;
}
@@ -259,10 +259,10 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
- const struct rte_crypto_xform *auth_xform = NULL;
- const struct rte_crypto_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
/* Select Crypto operation - hash then cipher / cipher then hash */
switch (aesni_mb_get_chain_order(xform)) {
@@ -296,11 +296,11 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
if (unlikely(crypto_op->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
@@ -313,7 +313,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
return NULL;
sess = (struct aesni_mb_session *)
- ((struct rte_cryptodev_session *)_sess)->_private;
+ ((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
sess, crypto_op->xform) != 0)) {
@@ -339,7 +339,8 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
*/
static JOB_AES_HMAC *
process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_op *c_op, struct aesni_mb_session *session)
+ struct rte_crypto_sym_op *c_op,
+ struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
@@ -432,14 +433,14 @@ static struct rte_mbuf *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
struct rte_mbuf *m;
- struct rte_crypto_op *c_op;
+ struct rte_crypto_sym_op *c_op;
if (job->user_data == NULL)
return NULL;
/* handled retrieved job */
m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_op *)job->user_data2;
+ c_op = (struct rte_crypto_sym_op *)job->user_data2;
/* set status as successful by default */
c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -459,7 +460,7 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_OP_SESSIONLESS) {
+ if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
rte_mempool_put(qp->sess_mp, c_op->session);
c_op->session = NULL;
}
@@ -511,7 +512,8 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
int i, processed_jobs = 0;
for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i], RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(bufs[i],
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
qp->qp_stats.enqueue_err_count++;
goto flush_jobs;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index e43b898..3cd9990 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -108,7 +108,7 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
if (dev_info != NULL) {
dev_info->dev_type = dev->dev_type;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->max_nb_sessions = internals->max_nb_sessions;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
}
}
@@ -243,7 +243,7 @@ aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
/** Configure a aesni multi-buffer session from a crypto xform chain */
static void *
aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform, void *sess)
{
struct aesni_mb_private *internals = dev->data->dev_private;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 2f98609..ab70c15 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -218,7 +218,7 @@ struct aesni_mb_session {
extern int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform);
+ const struct rte_crypto_sym_xform *xform);
/** device specific operations function pointer structure */
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 828756b..e7b9027 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,37 +89,37 @@ void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
}
static int
-qat_get_cmd_id(const struct rte_crypto_xform *xform)
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
{
if (xform->next == NULL)
return -1;
/* Cipher Only */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_CIPHER; */
/* Authentication Only */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_AUTH; */
/* Cipher then Authenticate */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
/* Authenticate then Cipher */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
return -1;
}
static struct rte_crypto_auth_xform *
-qat_get_auth_xform(struct rte_crypto_xform *xform)
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return &xform->auth;
xform = xform->next;
@@ -129,10 +129,10 @@ qat_get_auth_xform(struct rte_crypto_xform *xform)
}
static struct rte_crypto_cipher_xform *
-qat_get_cipher_xform(struct rte_crypto_xform *xform)
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return &xform->cipher;
xform = xform->next;
@@ -144,7 +144,7 @@ qat_get_cipher_xform(struct rte_crypto_xform *xform)
void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private)
+ struct rte_crypto_sym_xform *xform, void *session_private)
{
struct qat_pmd_private *internals = dev->data->dev_private;
@@ -261,7 +261,7 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
auth_xform->digest_length))
goto error_out;
- return (struct rte_cryptodev_session *)session;
+ return (struct rte_crypto_sym_session *)session;
error_out:
rte_mempool_put(internals->sess_mp, session);
@@ -275,7 +275,7 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
+uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
register struct qat_queue *queue;
@@ -327,7 +327,8 @@ kick_tail:
}
uint16_t
-qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct rte_mbuf_offload *ol;
struct qat_queue *queue;
@@ -343,12 +344,13 @@ qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
msg_counter != nb_pkts) {
rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf, RTE_PKTMBUF_OL_CRYPTO);
-
+ ol = rte_pktmbuf_offload_get(rx_mbuf,
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ ol->op.crypto.status =
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
@@ -384,20 +386,21 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
"to (%p) mbuf.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
" requests mbuf (%p) is sessionless.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type != RTE_CRYPTODEV_QAT_PMD)) {
+ if (unlikely(ol->op.crypto.session->type
+ != RTE_CRYPTODEV_QAT_SYM_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
@@ -520,8 +523,8 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
ADF_NUM_SYM_QPS_PER_BUNDLE *
ADF_NUM_BUNDLES_PER_DEV;
- info->max_nb_sessions = internals->max_nb_sessions;
- info->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
}
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index d680364..e9f71fe 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -109,16 +109,18 @@ qat_crypto_sym_session_init(struct rte_mempool *mempool, void *priv_sess);
extern void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+ struct rte_crypto_sym_xform *xform, void *session_private);
extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
uint16_t
-qat_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
uint16_t
-qat_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index e500c1e..85700fc 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,11 +89,11 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->pci_dev->addr.devid,
cryptodev->pci_dev->addr.function);
- cryptodev->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
+ cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index a950b74..93de786 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -124,13 +124,13 @@ struct l2fwd_crypto_options {
enum l2fwd_crypto_xform_chain xform_chain;
- struct rte_crypto_xform cipher_xform;
+ struct rte_crypto_sym_xform cipher_xform;
uint8_t ckey_data[32];
- struct rte_crypto_key iv_key;
+ struct rte_crypto_sym_key iv_key;
uint8_t ivkey_data[16];
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform auth_xform;
uint8_t akey_data[128];
};
@@ -142,8 +142,11 @@ struct l2fwd_crypto_params {
unsigned digest_length;
unsigned block_size;
- struct rte_crypto_key iv_key;
- struct rte_cryptodev_session *session;
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } iv;
+ struct rte_cryptodev_sym_session *session;
};
/** lcore configuration */
@@ -372,7 +375,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
/* Append space for digest to end of packet */
ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
@@ -474,11 +477,11 @@ generate_random_key(uint8_t *key, unsigned length)
key[i] = rand() % 0xff;
}
-static struct rte_cryptodev_session *
+static struct rte_cryptodev_sym_session *
initialize_crypto_session(struct l2fwd_crypto_options *options,
uint8_t cdev_id)
{
- struct rte_crypto_xform *first_xform;
+ struct rte_crypto_sym_xform *first_xform;
if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
first_xform = &options->cipher_xform;
@@ -489,7 +492,7 @@ initialize_crypto_session(struct l2fwd_crypto_options *options,
}
/* Setup Cipher Parameters */
- return rte_cryptodev_session_create(cdev_id, first_xform);
+ return rte_cryptodev_sym_session_create(cdev_id, first_xform);
}
static void
@@ -610,7 +613,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
m = pkts_burst[j];
ol = rte_pktmbuf_offload_alloc(
l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
/*
* If we can't allocate a offload, then drop
* the rest of the burst and dequeue and
@@ -689,7 +692,7 @@ parse_cryptodev_type(enum rte_cryptodev_type *type, char *optarg)
*type = RTE_CRYPTODEV_AESNI_MB_PMD;
return 0;
} else if (strcmp("QAT", optarg) == 0) {
- *type = RTE_CRYPTODEV_QAT_PMD;
+ *type = RTE_CRYPTODEV_QAT_SYM_PMD;
return 0;
}
@@ -745,7 +748,7 @@ parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
/** Parse crypto key command line argument */
static int
-parse_key(struct rte_crypto_key *key __rte_unused,
+parse_key(struct rte_crypto_sym_key *key __rte_unused,
unsigned length __rte_unused, char *arg __rte_unused)
{
printf("Currently an unsupported argument!\n");
@@ -982,7 +985,7 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
switch (options->cdev_type) {
case RTE_CRYPTODEV_AESNI_MB_PMD:
printf("cryptodev type: AES-NI MB PMD\n"); break;
- case RTE_CRYPTODEV_QAT_PMD:
+ case RTE_CRYPTODEV_QAT_SYM_PMD:
printf("cryptodev type: QAT PMD\n"); break;
default:
break;
@@ -1179,7 +1182,7 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports)
unsigned i, cdev_id, cdev_count, enabled_cdev_count = 0;
int retval;
- if (options->cdev_type == RTE_CRYPTODEV_QAT_PMD) {
+ if (options->cdev_type == RTE_CRYPTODEV_QAT_SYM_PMD) {
if (rte_cryptodev_count() < nb_ports)
return -1;
} else if (options->cdev_type == RTE_CRYPTODEV_AESNI_MB_PMD) {
diff --git a/lib/librte_cryptodev/Makefile b/lib/librte_cryptodev/Makefile
index 81fa3fc..0d59229 100644
--- a/lib/librte_cryptodev/Makefile
+++ b/lib/librte_cryptodev/Makefile
@@ -45,6 +45,7 @@ SRCS-y += rte_cryptodev.c
# export include files
SYMLINK-y-include += rte_crypto.h
+SYMLINK-y-include += rte_crypto_sym.h
SYMLINK-y-include += rte_cryptodev.h
SYMLINK-y-include += rte_cryptodev_pmd.h
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index 42343a8..df0c0b8 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,572 +36,15 @@
/**
* @file rte_crypto.h
*
- * RTE Cryptographic Definitions
+ * RTE Cryptography Common Definitions
*
- * Defines symmetric cipher and authentication algorithms and modes, as well
- * as supported symmetric crypto operation combinations.
*/
#ifdef __cplusplus
extern "C" {
#endif
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_mempool.h>
-
-/** Symmetric Cipher Algorithms */
-enum rte_crypto_cipher_algorithm {
- RTE_CRYPTO_CIPHER_NULL = 1,
- /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
-
- RTE_CRYPTO_CIPHER_3DES_CBC,
- /**< Triple DES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_3DES_CTR,
- /**< Triple DES algorithm in CTR mode */
- RTE_CRYPTO_CIPHER_3DES_ECB,
- /**< Triple DES algorithm in ECB mode */
-
- RTE_CRYPTO_CIPHER_AES_CBC,
- /**< AES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_AES_CCM,
- /**< AES algorithm in CCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_CCM* element of the
- * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_xform* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation
- */
- RTE_CRYPTO_CIPHER_AES_CTR,
- /**< AES algorithm in Counter mode */
- RTE_CRYPTO_CIPHER_AES_ECB,
- /**< AES algorithm in ECB mode */
- RTE_CRYPTO_CIPHER_AES_F8,
- /**< AES algorithm in F8 mode */
- RTE_CRYPTO_CIPHER_AES_GCM,
- /**< AES algorithm in GCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_GCM* element of the
- * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_setup_data* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation.
- */
- RTE_CRYPTO_CIPHER_AES_XTS,
- /**< AES algorithm in XTS mode */
-
- RTE_CRYPTO_CIPHER_ARC4,
- /**< (A)RC4 cipher algorithm */
-
- RTE_CRYPTO_CIPHER_KASUMI_F8,
- /**< Kasumi algorithm in F8 mode */
-
- RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- /**< SNOW3G algorithm in UEA2 mode */
-
- RTE_CRYPTO_CIPHER_ZUC_EEA3
- /**< ZUC algorithm in EEA3 mode */
-};
-
-/** Symmetric Cipher Direction */
-enum rte_crypto_cipher_operation {
- RTE_CRYPTO_CIPHER_OP_ENCRYPT,
- /**< Encrypt cipher operation */
- RTE_CRYPTO_CIPHER_OP_DECRYPT
- /**< Decrypt cipher operation */
-};
-
-/** Crypto key structure */
-struct rte_crypto_key {
- uint8_t *data; /**< pointer to key data */
- phys_addr_t phys_addr;
- size_t length; /**< key length in bytes */
-};
-
-/**
- * Symmetric Cipher Setup Data.
- *
- * This structure contains data relating to Cipher (Encryption and Decryption)
- * use to create a session.
- */
-struct rte_crypto_cipher_xform {
- enum rte_crypto_cipher_operation op;
- /**< This parameter determines if the cipher operation is an encrypt or
- * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
- * only encrypt operations are valid.
- */
- enum rte_crypto_cipher_algorithm algo;
- /**< Cipher algorithm */
-
- struct rte_crypto_key key;
- /**< Cipher key
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
- * point to a concatenation of the AES encryption key followed by a
- * keymask. As per RFC3711, the keymask should be padded with trailing
- * bytes to match the length of the encryption key used.
- *
- * For AES-XTS mode of operation, two keys must be provided and
- * key.data must point to the two keys concatenated together (Key1 ||
- * Key2). The cipher key length will contain the total size of both
- * keys.
- *
- * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
- * 192 bits (24 bytes) or 256 bits (32 bytes).
- *
- * For the CCM mode of operation, the only supported key length is 128
- * bits (16 bytes).
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
- * should be set to the combined length of the encryption key and the
- * keymask. Since the keymask and the encryption key are the same size,
- * key.length should be set to 2 x the AES encryption key length.
- *
- * For the AES-XTS mode of operation:
- * - Two keys must be provided and key.length refers to total length of
- * the two keys.
- * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
- * - Both keys must have the same size.
- **/
-};
-
-/** Symmetric Authentication / Hash Algorithms */
-enum rte_crypto_auth_algorithm {
- RTE_CRYPTO_AUTH_NULL = 1,
- /**< NULL hash algorithm. */
-
- RTE_CRYPTO_AUTH_AES_CBC_MAC,
- /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
- RTE_CRYPTO_AUTH_AES_CCM,
- /**< AES algorithm in CCM mode. This is an authenticated cipher. When
- * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
- * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
- * set up the related rte_crypto_cipher_setup_data structure in the
- * session context or the corresponding parameter in the crypto
- * operation data structures op_params parameter MUST be set for a
- * session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_CMAC,
- /**< AES CMAC algorithm. */
- RTE_CRYPTO_AUTH_AES_GCM,
- /**< AES algorithm in GCM mode. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_GMAC,
- /**< AES GMAC algorithm. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_XCBC_MAC,
- /**< AES XCBC algorithm. */
-
- RTE_CRYPTO_AUTH_KASUMI_F9,
- /**< Kasumi algorithm in F9 mode. */
-
- RTE_CRYPTO_AUTH_MD5,
- /**< MD5 algorithm */
- RTE_CRYPTO_AUTH_MD5_HMAC,
- /**< HMAC using MD5 algorithm */
-
- RTE_CRYPTO_AUTH_SHA1,
- /**< 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA1_HMAC,
- /**< HMAC using 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224,
- /**< 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224_HMAC,
- /**< HMAC using 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256,
- /**< 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256_HMAC,
- /**< HMAC using 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384,
- /**< 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384_HMAC,
- /**< HMAC using 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512,
- /**< 512 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512_HMAC,
- /**< HMAC using 512 bit SHA algorithm. */
-
- RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- /**< SNOW3G algorithm in UIA2 mode. */
-
- RTE_CRYPTO_AUTH_ZUC_EIA3,
- /**< ZUC algorithm in EIA3 mode */
-};
-
-/** Symmetric Authentication / Hash Operations */
-enum rte_crypto_auth_operation {
- RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
- RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
-};
-
-/**
- * Authentication / Hash transform data.
- *
- * This structure contains data relating to an authentication/hash crypto
- * transforms. The fields op, algo and digest_length are common to all
- * authentication transforms and MUST be set.
- */
-struct rte_crypto_auth_xform {
- enum rte_crypto_auth_operation op;
- /**< Authentication operation type */
- enum rte_crypto_auth_algorithm algo;
- /**< Authentication algorithm selection */
-
- struct rte_crypto_key key; /**< Authentication key data.
- * The authentication key length MUST be less than or equal to the
- * block size of the algorithm. It is the callers responsibility to
- * ensure that the key length is compliant with the standard being used
- * (for example RFC 2104, FIPS 198a).
- */
-
- uint32_t digest_length;
- /**< Length of the digest to be returned. If the verify option is set,
- * this specifies the length of the digest to be compared for the
- * session.
- *
- * If the value is less than the maximum length allowed by the hash,
- * the result shall be truncated. If the value is greater than the
- * maximum length allowed by the hash then an error will be generated
- * by *rte_cryptodev_session_create* or by the
- * *rte_cryptodev_enqueue_burst* if using session-less APIs.
- */
-
- uint32_t add_auth_data_length;
- /**< The length of the additional authenticated data (AAD) in bytes.
- * The maximum permitted value is 240 bytes, unless otherwise specified
- * below.
- *
- * This field must be specified when the hash algorithm is one of the
- * following:
- *
- * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
- * length of the IV (which should be 16).
- *
- * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
- * the length of the Additional Authenticated Data (called A, in NIST
- * SP800-38D).
- *
- * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
- * the length of the associated data (called A, in NIST SP800-38C).
- * Note that this does NOT include the length of any padding, or the
- * 18 bytes reserved at the start of the above field to store the
- * block B0 and the encoded length. The maximum permitted value in
- * this case is 222 bytes.
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
- * this field is not used and should be set to 0. Instead the length
- * of the AAD data is specified in the message length to hash field of
- * the rte_crypto_op_data structure.
- */
-};
-
-/** Crypto transformation types */
-enum rte_crypto_xform_type {
- RTE_CRYPTO_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
- RTE_CRYPTO_XFORM_AUTH, /**< Authentication xform */
- RTE_CRYPTO_XFORM_CIPHER /**< Cipher xform */
-};
-
-/**
- * Crypto transform structure.
- *
- * This is used to specify the crypto transforms required, multiple transforms
- * can be chained together to specify a chain transforms such as authentication
- * then cipher, or cipher then authentication. Each transform structure can
- * hold a single transform, the type field is used to specify which transform
- * is contained within the union
- */
-struct rte_crypto_xform {
- struct rte_crypto_xform *next; /**< next xform in chain */
-
- enum rte_crypto_xform_type type; /**< xform type */
- union {
- struct rte_crypto_auth_xform auth;
- /**< Authentication / hash xform */
- struct rte_crypto_cipher_xform cipher;
- /**< Cipher xform */
- };
-};
-
-/**
- * Crypto operation session type. This is used to specify whether a crypto
- * operation has session structure attached for immutable parameters or if all
- * operation information is included in the operation data structure.
- */
-enum rte_crypto_op_sess_type {
- RTE_CRYPTO_OP_WITH_SESSION, /**< Session based crypto operation */
- RTE_CRYPTO_OP_SESSIONLESS /**< Session-less crypto operation */
-};
-
-/** Status of crypto operation */
-enum rte_crypto_op_status {
- RTE_CRYPTO_OP_STATUS_SUCCESS,
- /**< Operation completed successfully */
- RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
- /**< Operation not yet submitted to a cryptodev */
- RTE_CRYPTO_OP_STATUS_ENQUEUED,
- /**< Operation is enqueued on device */
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
- /**< Authentication verification failed */
- RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
- /**< Operation failed due to invalid arguments in request */
- RTE_CRYPTO_OP_STATUS_ERROR,
- /**< Error handling operation */
-};
-
-/**
- * Cryptographic Operation Data.
- *
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_enqueue_burst() call
- * for performing cipher, hash, or a combined hash and cipher operations.
- */
-struct rte_crypto_op {
- enum rte_crypto_op_sess_type type;
- enum rte_crypto_op_status status;
-
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
-
- union {
- struct rte_cryptodev_session *session;
- /**< Handle for the initialised session context */
- struct rte_crypto_xform *xform;
- /**< Session-less API crypto operation parameters */
- };
-
- struct {
- struct {
- uint32_t offset;
- /**< Starting point for cipher processing, specified
- * as number of bytes from start of data in the source
- * buffer. The result of the cipher operation will be
- * written back into the output buffer starting at
- * this location.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source buffer
- * on which the cryptographic operation will be
- * computed. This must be a multiple of the block size
- * if a block cipher is being used. This is also the
- * same as the result length.
- *
- * @note
- * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
- * this value should not include the length of the
- * padding or the length of the MAC; the driver will
- * compute the actual number of bytes over which the
- * encryption will occur, which will include these
- * values.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
- * field should be set to 0.
- */
- } to_cipher; /**< Data offsets and length for ciphering */
-
- struct {
- uint32_t offset;
- /**< Starting point for hash processing, specified as
- * number of bytes from start of packet in source
- * buffer.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
- * mode of operation, this field specifies the start
- * of the AAD data in the source buffer.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source
- * buffer that the hash will be computed on.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
- * of operation, this field specifies the length of
- * the AAD data in the source buffer.
- */
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
-
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
-
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
-
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
-
- void *user_data;
- /**< opaque pointer for user data */
-};
-
-
-/**
- * Reset the fields of a crypto operation to their default values.
- *
- * @param op The crypto operation to be reset.
- */
-static inline void
-__rte_crypto_op_reset(struct rte_crypto_op *op)
-{
- op->type = RTE_CRYPTO_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
-}
-
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_op_attach_session(struct rte_crypto_op *op,
- struct rte_cryptodev_session *sess)
-{
- op->session = sess;
- op->type = RTE_CRYPTO_OP_WITH_SESSION;
-}
+#include <rte_crypto_sym.h>
#ifdef __cplusplus
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
new file mode 100644
index 0000000..cb2b8f6
--- /dev/null
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -0,0 +1,613 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_SYM_H_
+#define _RTE_CRYPTO_SYM_H_
+
+/**
+ * @file rte_crypto_sym.h
+ *
+ * RTE Definitions for Symmetric Cryptography
+ *
+ * Defines symmetric cipher and authentication algorithms and modes, as well
+ * as supported symmetric crypto operation combinations.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+
+/** Symmetric Cipher Algorithms */
+enum rte_crypto_cipher_algorithm {
+ RTE_CRYPTO_CIPHER_NULL = 1,
+ /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
+
+ RTE_CRYPTO_CIPHER_3DES_CBC,
+ /**< Triple DES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_3DES_CTR,
+ /**< Triple DES algorithm in CTR mode */
+ RTE_CRYPTO_CIPHER_3DES_ECB,
+ /**< Triple DES algorithm in ECB mode */
+
+ RTE_CRYPTO_CIPHER_AES_CBC,
+ /**< AES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_AES_CCM,
+ /**< AES algorithm in CCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_CCM* element of the
+ * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_xform* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation
+ */
+ RTE_CRYPTO_CIPHER_AES_CTR,
+ /**< AES algorithm in Counter mode */
+ RTE_CRYPTO_CIPHER_AES_ECB,
+ /**< AES algorithm in ECB mode */
+ RTE_CRYPTO_CIPHER_AES_F8,
+ /**< AES algorithm in F8 mode */
+ RTE_CRYPTO_CIPHER_AES_GCM,
+ /**< AES algorithm in GCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_GCM* element of the
+ * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_setup_data* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_CIPHER_AES_XTS,
+ /**< AES algorithm in XTS mode */
+
+ RTE_CRYPTO_CIPHER_ARC4,
+ /**< (A)RC4 cipher algorithm */
+
+ RTE_CRYPTO_CIPHER_KASUMI_F8,
+ /**< Kasumi algorithm in F8 mode */
+
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ /**< SNOW3G algorithm in UEA2 mode */
+
+ RTE_CRYPTO_CIPHER_ZUC_EEA3
+ /**< ZUC algorithm in EEA3 mode */
+};
+
+/** Symmetric Cipher Direction */
+enum rte_crypto_cipher_operation {
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ /**< Encrypt cipher operation */
+ RTE_CRYPTO_CIPHER_OP_DECRYPT
+ /**< Decrypt cipher operation */
+};
+
+/**
+ * Symmetric Cipher Setup Data.
+ *
+ * This structure contains data relating to Cipher (Encryption and Decryption)
+ * use to create a session.
+ */
+struct rte_crypto_cipher_xform {
+ enum rte_crypto_cipher_operation op;
+ /**< This parameter determines if the cipher operation is an encrypt or
+ * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
+ * only encrypt operations are valid.
+ */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< Cipher algorithm */
+
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } key;
+ /**< Cipher key
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
+ * point to a concatenation of the AES encryption key followed by a
+ * keymask. As per RFC3711, the keymask should be padded with trailing
+ * bytes to match the length of the encryption key used.
+ *
+ * For AES-XTS mode of operation, two keys must be provided and
+ * key.data must point to the two keys concatenated together (Key1 ||
+ * Key2). The cipher key length will contain the total size of both
+ * keys.
+ *
+ * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
+ * 192 bits (24 bytes) or 256 bits (32 bytes).
+ *
+ * For the CCM mode of operation, the only supported key length is 128
+ * bits (16 bytes).
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
+ * should be set to the combined length of the encryption key and the
+ * keymask. Since the keymask and the encryption key are the same size,
+ * key.length should be set to 2 x the AES encryption key length.
+ *
+ * For the AES-XTS mode of operation:
+ * - Two keys must be provided and key.length refers to total length of
+ * the two keys.
+ * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
+ * - Both keys must have the same size.
+ **/
+};
+
+/** Symmetric Authentication / Hash Algorithms */
+enum rte_crypto_auth_algorithm {
+ RTE_CRYPTO_AUTH_NULL = 1,
+ /**< NULL hash algorithm. */
+
+ RTE_CRYPTO_AUTH_AES_CBC_MAC,
+ /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
+ RTE_CRYPTO_AUTH_AES_CCM,
+ /**< AES algorithm in CCM mode. This is an authenticated cipher. When
+ * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
+ * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
+ * set up the related rte_crypto_cipher_setup_data structure in the
+ * session context or the corresponding parameter in the crypto
+ * operation data structures op_params parameter MUST be set for a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_CMAC,
+ /**< AES CMAC algorithm. */
+ RTE_CRYPTO_AUTH_AES_GCM,
+ /**< AES algorithm in GCM mode. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_GMAC,
+ /**< AES GMAC algorithm. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ /**< AES XCBC algorithm. */
+
+ RTE_CRYPTO_AUTH_KASUMI_F9,
+ /**< Kasumi algorithm in F9 mode. */
+
+ RTE_CRYPTO_AUTH_MD5,
+ /**< MD5 algorithm */
+ RTE_CRYPTO_AUTH_MD5_HMAC,
+ /**< HMAC using MD5 algorithm */
+
+ RTE_CRYPTO_AUTH_SHA1,
+ /**< 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA1_HMAC,
+ /**< HMAC using 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224,
+ /**< 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224_HMAC,
+ /**< HMAC using 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256,
+ /**< 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256_HMAC,
+ /**< HMAC using 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384,
+ /**< 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384_HMAC,
+ /**< HMAC using 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512,
+ /**< 512 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512_HMAC,
+ /**< HMAC using 512 bit SHA algorithm. */
+
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ /**< SNOW3G algorithm in UIA2 mode. */
+
+ RTE_CRYPTO_AUTH_ZUC_EIA3,
+ /**< ZUC algorithm in EIA3 mode */
+};
+
+/** Symmetric Authentication / Hash Operations */
+enum rte_crypto_auth_operation {
+ RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
+ RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
+};
+
+/**
+ * Authentication / Hash transform data.
+ *
+ * This structure contains data relating to an authentication/hash crypto
+ * transforms. The fields op, algo and digest_length are common to all
+ * authentication transforms and MUST be set.
+ */
+struct rte_crypto_auth_xform {
+ enum rte_crypto_auth_operation op;
+ /**< Authentication operation type */
+ enum rte_crypto_auth_algorithm algo;
+ /**< Authentication algorithm selection */
+
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } key;
+ /**< Authentication key data.
+ * The authentication key length MUST be less than or equal to the
+ * block size of the algorithm. It is the callers responsibility to
+ * ensure that the key length is compliant with the standard being used
+ * (for example RFC 2104, FIPS 198a).
+ */
+
+ uint32_t digest_length;
+ /**< Length of the digest to be returned. If the verify option is set,
+ * this specifies the length of the digest to be compared for the
+ * session.
+ *
+ * If the value is less than the maximum length allowed by the hash,
+ * the result shall be truncated. If the value is greater than the
+ * maximum length allowed by the hash then an error will be generated
+ * by *rte_cryptodev_sym_session_create* or by the
+ * *rte_cryptodev_sym_enqueue_burst* if using session-less APIs.
+ */
+
+ uint32_t add_auth_data_length;
+ /**< The length of the additional authenticated data (AAD) in bytes.
+ * The maximum permitted value is 240 bytes, unless otherwise specified
+ * below.
+ *
+ * This field must be specified when the hash algorithm is one of the
+ * following:
+ *
+ * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
+ * length of the IV (which should be 16).
+ *
+ * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
+ * the length of the Additional Authenticated Data (called A, in NIST
+ * SP800-38D).
+ *
+ * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
+ * the length of the associated data (called A, in NIST SP800-38C).
+ * Note that this does NOT include the length of any padding, or the
+ * 18 bytes reserved at the start of the above field to store the
+ * block B0 and the encoded length. The maximum permitted value in
+ * this case is 222 bytes.
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
+ * this field is not used and should be set to 0. Instead the length
+ * of the AAD data is specified in the message length to hash field of
+ * the rte_crypto_sym_op_data structure.
+ */
+};
+
+/** Crypto transformation types */
+enum rte_crypto_sym_xform_type {
+ RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
+ RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */
+ RTE_CRYPTO_SYM_XFORM_CIPHER /**< Cipher xform */
+};
+
+/**
+ * Symmetric crypto transform structure.
+ *
+ * This is used to specify the crypto transforms required, multiple transforms
+ * can be chained together to specify a chain transforms such as authentication
+ * then cipher, or cipher then authentication. Each transform structure can
+ * hold a single transform, the type field is used to specify which transform
+ * is contained within the union
+ */
+struct rte_crypto_sym_xform {
+ struct rte_crypto_sym_xform *next;
+ /**< next xform in chain */
+ enum rte_crypto_sym_xform_type type
+ ; /**< xform type */
+ union {
+ struct rte_crypto_auth_xform auth;
+ /**< Authentication / hash xform */
+ struct rte_crypto_cipher_xform cipher;
+ /**< Cipher xform */
+ };
+};
+
+/**
+ * Crypto operation session type. This is used to specify whether a crypto
+ * operation has session structure attached for immutable parameters or if all
+ * operation information is included in the operation data structure.
+ */
+enum rte_crypto_sym_op_sess_type {
+ RTE_CRYPTO_SYM_OP_WITH_SESSION, /**< Session based crypto operation */
+ RTE_CRYPTO_SYM_OP_SESSIONLESS /**< Session-less crypto operation */
+};
+
+
+/** Status of crypto operation */
+enum rte_crypto_op_status {
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ /**< Operation completed successfully */
+ RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
+ /**< Operation not yet submitted to a cryptodev */
+ RTE_CRYPTO_OP_STATUS_ENQUEUED,
+ /**< Operation is enqueued on device */
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
+ /**< Authentication verification failed */
+ RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
+ /**< Operation failed due to invalid arguments in request */
+ RTE_CRYPTO_OP_STATUS_ERROR,
+ /**< Error handling operation */
+};
+
+/**
+ * Cryptographic Operation Data.
+ *
+ * This structure contains data relating to performing cryptographic processing
+ * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
+ * call for performing cipher, hash, or a combined hash and cipher operations.
+ */
+struct rte_crypto_sym_op {
+ enum rte_crypto_sym_op_sess_type type;
+ enum rte_crypto_op_status status;
+
+ struct {
+ struct rte_mbuf *m; /**< Destination mbuf */
+ uint8_t offset; /**< Data offset */
+ } dst;
+
+ union {
+ struct rte_cryptodev_sym_session *session;
+ /**< Handle for the initialised session context */
+ struct rte_crypto_sym_xform *xform;
+ /**< Session-less API crypto operation parameters */
+ };
+
+ struct {
+ struct {
+ uint32_t offset;
+ /**< Starting point for cipher processing, specified
+ * as number of bytes from start of data in the source
+ * buffer. The result of the cipher operation will be
+ * written back into the output buffer starting at
+ * this location.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source buffer
+ * on which the cryptographic operation will be
+ * computed. This must be a multiple of the block size
+ * if a block cipher is being used. This is also the
+ * same as the result length.
+ *
+ * @note
+ * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
+ * this value should not include the length of the
+ * padding or the length of the MAC; the driver will
+ * compute the actual number of bytes over which the
+ * encryption will occur, which will include these
+ * values.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
+ * field should be set to 0.
+ */
+ } to_cipher; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint32_t offset;
+ /**< Starting point for hash processing, specified as
+ * number of bytes from start of packet in source
+ * buffer.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
+ * mode of operation, this field specifies the start
+ * of the AAD data in the source buffer.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source
+ * buffer that the hash will be computed on.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
+ * of operation, this field specifies the length of
+ * the AAD data in the source buffer.
+ */
+ } to_hash; /**< Data offsets and length for authentication */
+ } data; /**< Details of data to be operated on */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
+ * Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length is 96
+ * bits) or J0 (for other sizes), where J0 is as defined by
+ * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
+ * needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the nonce
+ * should be written starting at &iv[1] (to allow space for the
+ * implementation to write in the flags in the first byte).
+ * Note that a full 16 bytes should be allocated, even though
+ * the length field will have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
+ * 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD be
+ * 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ size_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the length of the
+ * IV (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length of the
+ * counter (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
+ * which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce, which can
+ * be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result should be
+ * inserted (in the case of digest generation) or where the
+ * purported digest exists (in the case of digest
+ * verification).
+ *
+ * At session creation time, the client specified the digest
+ * result length with the digest_length member of the @ref
+ * rte_crypto_auth_xform structure. For physical crypto
+ * devices the caller must allocate at least digest_length of
+ * physically contiguous memory at this location.
+ *
+ * For digest generation, the digest result will overwrite
+ * any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is understood
+ * to be in the destination buffer for digest generation, and
+ * in the source buffer for digest verification. The location
+ * of the digest result in this case is immediately following
+ * the region over which the digest is computed.
+ */
+ phys_addr_t phys_addr; /**< Physical address of digest */
+ uint32_t length; /**< Length of digest */
+ } digest; /**< Digest parameters */
+
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD) needed for
+ * authenticated cipher mechanisms (CCM and GCM), and to the IV
+ * for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is set up
+ * for the session in the @ref rte_crypto_auth_xform structure
+ * as part of the @ref rte_cryptodev_sym_session_create function
+ * call. This length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
+ * caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset of one
+ * byte into the array, leaving room for the implementation
+ * to write in the flags to the first byte.
+ *
+ * - the additional authentication data itself should be
+ * written starting at an offset of 18 bytes into the array,
+ * leaving room for the length encoding in the first two
+ * bytes of the second block.
+ *
+ * - the array should be big enough to hold the above fields,
+ * plus any padding to round this up to the nearest multiple
+ * of the block size (16 bytes). Padding will be added by
+ * the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus any
+ * space to round this up to the nearest multiple of the
+ * block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set to 0.
+ * Instead the AAD data should be placed in the source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint32_t length; /**< Length of digest */
+ } additional_auth;
+ /**< Additional authentication parameters */
+
+ struct rte_mempool *pool;
+ /**< mempool used to allocate crypto op */
+
+ void *user_data;
+ /**< opaque pointer for user data */
+};
+
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ */
+static inline void
+__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
+{
+ op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
+ op->dst.m = NULL;
+ op->dst.offset = 0;
+}
+
+/** Attach a session to a crypto operation */
+static inline void
+rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ op->session = sess;
+ op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTO_SYM_H_ */
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 2838852..c7fef6a 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -572,8 +572,8 @@ rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id);
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id);
int
rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
@@ -604,8 +604,10 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
}
/* Setup Session mempool for device */
- return rte_crypto_session_pool_create(dev, config->session_mp.nb_objs,
- config->session_mp.cache_size, config->socket_id);
+ return rte_cryptodev_sym_session_pool_create(dev,
+ config->session_mp.nb_objs,
+ config->session_mp.cache_size,
+ config->socket_id);
}
@@ -911,12 +913,12 @@ rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
static void
-rte_crypto_session_init(struct rte_mempool *mp,
+rte_cryptodev_sym_session_init(struct rte_mempool *mp,
void *opaque_arg,
void *_sess,
__rte_unused unsigned i)
{
- struct rte_cryptodev_session *sess = _sess;
+ struct rte_cryptodev_sym_session *sess = _sess;
struct rte_cryptodev *dev = opaque_arg;
memset(sess, 0, mp->elt_size);
@@ -930,8 +932,8 @@ rte_crypto_session_init(struct rte_mempool *mp,
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id)
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id)
{
char mp_name[RTE_CRYPTODEV_NAME_MAX_LEN];
unsigned priv_sess_size;
@@ -951,7 +953,7 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
return -ENOMEM;
}
- unsigned elt_size = sizeof(struct rte_cryptodev_session) +
+ unsigned elt_size = sizeof(struct rte_cryptodev_sym_session) +
priv_sess_size;
dev->data->session_pool = rte_mempool_lookup(mp_name);
@@ -975,7 +977,8 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
0, /* private data size */
NULL, /* obj initialization constructor */
NULL, /* obj initialization constructor arg */
- rte_crypto_session_init, /* obj constructor */
+ rte_cryptodev_sym_session_init,
+ /**< obj constructor*/
dev, /* obj constructor arg */
socket_id, /* socket id */
0); /* flags */
@@ -990,11 +993,12 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
return 0;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform)
{
struct rte_cryptodev *dev;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
void *_sess;
if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
@@ -1010,7 +1014,7 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
return NULL;
}
- sess = (struct rte_cryptodev_session *)_sess;
+ sess = (struct rte_cryptodev_sym_session *)_sess;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_configure, NULL);
if (dev->dev_ops->session_configure(dev, xform, sess->_private) ==
@@ -1026,8 +1030,9 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
return sess;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id, struct rte_cryptodev_session *sess)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess)
{
struct rte_cryptodev *dev;
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 892375d..732e2b9 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -1,6 +1,6 @@
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,14 +57,14 @@ extern "C" {
/**< Null crypto PMD device name */
#define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd")
/**< AES-NI Multi buffer PMD device name */
-#define CRYPTODEV_NAME_QAT_PMD ("cryptodev_qat_pmd")
-/**< Intel QAT PMD device name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd")
+/**< Intel QAT SYM PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
- RTE_CRYPTODEV_QAT_PMD, /**< QAT PMD */
+ RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT SYM PMD */
};
/* Logging Macros */
@@ -99,8 +99,11 @@ struct rte_cryptodev_info {
unsigned max_nb_queue_pairs;
/**< Maximum number of queues pairs supported by device. */
- unsigned max_nb_sessions;
- /**< Maximum number of sessions supported by device. */
+
+ struct {
+ unsigned max_nb_sessions;
+ /**< Maximum number of sessions supported by device. */
+ } sym;
};
#define RTE_CRYPTODEV_DETACHED (0)
@@ -130,17 +133,6 @@ struct rte_cryptodev_qp_conf {
typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
enum rte_cryptodev_event_type event, void *cb_arg);
-#ifdef RTE_CRYPTODEV_PERF
-/**
- * Crypto Device performance counter statistics structure. This structure is
- * used for RDTSC counters for measuring crypto operations.
- */
-struct rte_cryptodev_perf_stats {
- uint64_t t_accumlated; /**< Accumulated time processing operation */
- uint64_t t_min; /**< Max time */
- uint64_t t_max; /**< Min time */
-};
-#endif
/** Crypto Device statistics */
struct rte_cryptodev_stats {
@@ -153,29 +145,8 @@ struct rte_cryptodev_stats {
/**< Total error count on operations enqueued */
uint64_t dequeue_err_count;
/**< Total error count on operations dequeued */
+};
-#ifdef RTE_CRYPTODEV_DETAILED_STATS
- struct {
- uint64_t encrypt_ops; /**< Count of encrypt operations */
- uint64_t encrypt_bytes; /**< Number of bytes encrypted */
-
- uint64_t decrypt_ops; /**< Count of decrypt operations */
- uint64_t decrypt_bytes; /**< Number of bytes decrypted */
- } cipher; /**< Cipher operations stats */
-
- struct {
- uint64_t generate_ops; /**< Count of generate operations */
- uint64_t bytes_hashed; /**< Number of bytes hashed */
-
- uint64_t verify_ops; /**< Count of verify operations */
- uint64_t bytes_verified;/**< Number of bytes verified */
- } hash; /**< Hash operations stats */
-#endif
-
-#ifdef RTE_CRYPTODEV_PERF
- struct rte_cryptodev_perf_stats op_perf; /**< Operations stats */
-#endif
-} __rte_cache_aligned;
/**
* Create a virtual crypto device
@@ -607,6 +578,23 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
}
+/** Cryptodev symmetric crypto session */
+struct rte_cryptodev_sym_session {
+ struct {
+ uint8_t dev_id;
+ /**< Device Id */
+ enum rte_cryptodev_type type;
+ /** Crypto Device type session created on */
+ struct rte_mempool *mp;
+ /**< Mempool session allocated from */
+ } __rte_aligned(8);
+ /**< Public symmetric session details */
+
+ char _private[0];
+ /**< Private session material */
+};
+
+
/**
* Initialise a session for symmetric cryptographic operations.
*
@@ -628,9 +616,9 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
* @return
* Pointer to the created session or NULL
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id,
- struct rte_crypto_xform *xform);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform);
/**
@@ -638,15 +626,15 @@ rte_cryptodev_session_create(uint8_t dev_id,
*
* @param dev_id The device identifier.
* @param session Session pointer previously allocated by
- * *rte_cryptodev_session_create*.
+ * *rte_cryptodev_sym_session_create*.
*
* @return
* NULL on successful freeing of session.
* Session pointer on failure to free session.
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id,
- struct rte_cryptodev_session *session);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *session);
#ifdef __cplusplus
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index 8270afa..7d049ea 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -1,6 +1,6 @@
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,11 +56,6 @@ extern "C" {
#include "rte_crypto.h"
#include "rte_cryptodev.h"
-struct rte_cryptodev_stats;
-struct rte_cryptodev_info;
-struct rte_cryptodev_qp_conf;
-
-enum rte_cryptodev_event_type;
#ifdef RTE_LIBRTE_CRYPTODEV_DEBUG
#define RTE_PMD_DEBUG_TRACE(...) \
@@ -80,7 +75,6 @@ struct rte_cryptodev_session {
};
struct rte_cryptodev_driver;
-struct rte_cryptodev;
/**
* Initialisation function of a crypto driver invoked for each matching
@@ -364,7 +358,7 @@ typedef uint32_t (*cryptodev_queue_pair_count_t)(struct rte_cryptodev *dev);
* - On success returns a pointer to a rte_mempool
* - On failure returns a NULL pointer
*/
-typedef int (*cryptodev_create_session_pool_t)(
+typedef int (*cryptodev_sym_create_session_pool_t)(
struct rte_cryptodev *dev, unsigned nb_objs,
unsigned obj_cache_size, int socket_id);
@@ -378,7 +372,7 @@ typedef int (*cryptodev_create_session_pool_t)(
* - On success returns the size of the session structure for device
* - On failure returns 0
*/
-typedef unsigned (*cryptodev_get_session_private_size_t)(
+typedef unsigned (*cryptodev_sym_get_session_private_size_t)(
struct rte_cryptodev *dev);
/**
@@ -392,7 +386,7 @@ typedef unsigned (*cryptodev_get_session_private_size_t)(
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
+typedef void (*cryptodev_sym_initialize_session_t)(struct rte_mempool *mempool,
void *session_private);
/**
@@ -406,14 +400,14 @@ typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void * (*cryptodev_configure_session_t)(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+typedef void * (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
/**
* Free Crypto session.
* @param session Cryptodev session structure to free
*/
-typedef void (*cryptodev_free_session_t)(struct rte_cryptodev *dev,
+typedef void (*cryptodev_sym_free_session_t)(struct rte_cryptodev *dev,
void *session_private);
@@ -427,9 +421,9 @@ struct rte_cryptodev_ops {
cryptodev_info_get_t dev_infos_get; /**< Get device info. */
cryptodev_stats_get_t stats_get;
- /**< Get generic device statistics. */
+ /**< Get device statistics. */
cryptodev_stats_reset_t stats_reset;
- /**< Reset generic device statistics. */
+ /**< Reset device statistics. */
cryptodev_queue_pair_setup_t queue_pair_setup;
/**< Set up a device queue pair. */
@@ -442,13 +436,13 @@ struct rte_cryptodev_ops {
cryptodev_queue_pair_count_t queue_pair_count;
/**< Get count of the queue pairs. */
- cryptodev_get_session_private_size_t session_get_size;
+ cryptodev_sym_get_session_private_size_t session_get_size;
/**< Return private session. */
- cryptodev_initialize_session_t session_initialize;
+ cryptodev_sym_initialize_session_t session_initialize;
/**< Initialization function for private session data */
- cryptodev_configure_session_t session_configure;
+ cryptodev_sym_configure_session_t session_configure;
/**< Configure a Crypto session. */
- cryptodev_free_session_t session_clear;
+ cryptodev_sym_free_session_t session_clear;
/**< Clear a Crypto sessions private data. */
};
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index ff8e93d..a46af6f 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -16,8 +16,8 @@ DPDK_2.2 {
rte_cryptodev_pmd_driver_register;
rte_cryptodev_pmd_release_device;
rte_cryptodev_pmd_virtual_dev_init;
- rte_cryptodev_session_create;
- rte_cryptodev_session_free;
+ rte_cryptodev_sym_session_create;
+ rte_cryptodev_sym_session_free;
rte_cryptodev_socket_id;
rte_cryptodev_start;
rte_cryptodev_stats_get;
@@ -29,4 +29,4 @@ DPDK_2.2 {
rte_cryptodev_queue_pair_stop;
local: *;
-};
\ No newline at end of file
+};
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
index 77993b6..5ce6058 100644
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ b/lib/librte_mbuf_offload/rte_mbuf_offload.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -67,7 +67,7 @@ extern "C" {
enum rte_mbuf_ol_op_type {
RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
/**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO
+ RTE_PKTMBUF_OL_CRYPTO_SYM
/**< Crypto offload operation */
};
@@ -84,7 +84,7 @@ struct rte_mbuf_offload {
enum rte_mbuf_ol_op_type type; /**< offload type */
union {
- struct rte_crypto_op crypto; /**< Crypto operation */
+ struct rte_crypto_sym_op crypto; /**< Crypto operation */
} op;
};
@@ -194,8 +194,8 @@ __rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
ol->type = type;
switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO:
- __rte_crypto_op_reset(&ol->op.crypto); break;
+ case RTE_PKTMBUF_OL_CRYPTO_SYM:
+ __rte_crypto_sym_op_reset(&ol->op.crypto); break;
default:
break;
}
@@ -278,24 +278,24 @@ __rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
* - On success returns pointer to first crypto xform in crypto operations chain
* - On failure returns NULL
*/
-static inline struct rte_crypto_xform *
-rte_pktmbuf_offload_alloc_crypto_xforms(struct rte_mbuf_offload *ol,
+static inline struct rte_crypto_sym_xform *
+rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
unsigned nb_xforms)
{
- struct rte_crypto_xform *xform;
+ struct rte_crypto_sym_xform *xform;
void *priv_data;
uint16_t size;
- size = sizeof(struct rte_crypto_xform) * nb_xforms;
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
if (priv_data == NULL)
return NULL;
- ol->op.crypto.xform = xform = (struct rte_crypto_xform *)priv_data;
+ ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
do {
- xform->type = RTE_CRYPTO_XFORM_NOT_SPECIFIED;
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
} while (xform);
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v6 2/2] cryptodev: change burst API to be crypto op oriented
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 " Declan Doherty
` (7 preceding siblings ...)
2016-03-04 18:29 ` [dpdk-dev] [PATCH v6 1/2] cryptodev: API tidy and changes to support future extensions Fiona Trahe
@ 2016-03-04 18:29 ` Fiona Trahe
2016-03-15 6:57 ` [dpdk-dev] [PATCH v4 0/2] cryptodev API changes Cao, Min
9 siblings, 0 replies; 62+ messages in thread
From: Fiona Trahe @ 2016-03-04 18:29 UTC (permalink / raw)
To: dev
This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts
rte_crypto_op's rather than the current implementation which operates on
rte_mbuf bursts, this simplifies the burst processing in the crypto PMDs and the
use of crypto operations in general.
The changes also continues the separatation of the symmetric operation parameters
from the more general operation parameters, this will simplify the integration of
asymmetric crypto operations in the future.
As well as the changes to the crypto APIs this patch adds functions for managing
rte_crypto_op pools to the cryptodev API. It modifies the existing PMDs, unit
tests and sample application to work with the modified APIs and finally
removes the now unused rte_mbuf_offload library.
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
---
MAINTAINERS | 4 -
app/test/test_cryptodev.c | 804 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 253 +++----
config/common_bsdapp | 9 +-
config/common_linuxapp | 9 +-
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 171 +++--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 12 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 2 +-
drivers/crypto/qat/qat_crypto.c | 123 ++--
drivers/crypto/qat/qat_crypto.h | 12 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 4 +-
examples/l2fwd-crypto/main.c | 283 ++++----
lib/Makefile | 1 -
lib/librte_cryptodev/rte_crypto.h | 364 +++++++++-
lib/librte_cryptodev/rte_crypto_sym.h | 379 +++++-----
lib/librte_cryptodev/rte_cryptodev.c | 76 ++
lib/librte_cryptodev/rte_cryptodev.h | 115 ++-
lib/librte_cryptodev/rte_cryptodev_version.map | 5 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 --------
.../rte_mbuf_offload_version.map | 7 -
25 files changed, 1579 insertions(+), 1532 deletions(-)
delete mode 100644 lib/librte_mbuf_offload/Makefile
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
diff --git a/MAINTAINERS b/MAINTAINERS
index 628bc05..8d84dda 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -222,10 +222,6 @@ F: lib/librte_mbuf/
F: doc/guides/prog_guide/mbuf_lib.rst
F: app/test/test_mbuf.c
-Packet buffer offload - EXPERIMENTAL
-M: Declan Doherty <declan.doherty@intel.com>
-F: lib/librte_mbuf_offload/
-
Ethernet API
M: Thomas Monjalon <thomas.monjalon@6wind.com>
F: lib/librte_ether/
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 951b443..208fc14 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -35,7 +35,6 @@
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#include <rte_mbuf_offload.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
@@ -48,7 +47,7 @@ static enum rte_cryptodev_type gbl_cryptodev_type;
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
struct rte_cryptodev_config conf;
struct rte_cryptodev_qp_conf qp_conf;
@@ -62,8 +61,7 @@ struct crypto_unittest_params {
struct rte_cryptodev_sym_session *sess;
- struct rte_mbuf_offload *ol;
- struct rte_crypto_sym_op *op;
+ struct rte_crypto_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -104,7 +102,7 @@ setup_test_string(struct rte_mempool *mpool,
return m;
}
-#if HEX_DUMP
+#ifdef HEX_DUMP
static void
hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
{
@@ -112,27 +110,29 @@ hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
}
#endif
-static struct rte_mbuf *
-process_crypto_request(uint8_t dev_id, struct rte_mbuf *ibuf)
+static struct rte_crypto_op *
+process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)
{
- struct rte_mbuf *obuf = NULL;
-#if HEX_DUMP
+#ifdef HEX_DUMP
hexdump_mbuf_data(stdout, "Enqueued Packet", ibuf);
#endif
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &ibuf, 1) != 1) {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
printf("Error sending packet for encryption");
return NULL;
}
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &obuf, 1) == 0)
+
+ op = NULL;
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
rte_pause();
-#if HEX_DUMP
+#ifdef HEX_DUMP
if (obuf)
hexdump_mbuf_data(stdout, "Dequeued Packet", obuf);
#endif
- return obuf;
+ return op;
}
static struct crypto_testsuite_params testsuite_params = { NULL };
@@ -162,13 +162,14 @@ testsuite_setup(void)
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "MBUF_OFFLOAD_POOL",
+ ts_params->op_mpool = rte_crypto_op_pool_create(
+ "MBUF_CRYPTO_SYM_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_sym_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -253,10 +254,9 @@ testsuite_teardown(void)
rte_mempool_count(ts_params->mbuf_pool));
}
-
- if (ts_params->mbuf_ol_pool != NULL) {
+ if (ts_params->op_mpool != NULL) {
RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n",
- rte_mempool_count(ts_params->mbuf_ol_pool));
+ rte_mempool_count(ts_params->op_mpool));
}
}
@@ -326,8 +326,8 @@ ut_teardown(void)
}
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
/*
* free mbuf - both obuf and ibuf are usually the same,
@@ -680,76 +680,77 @@ static uint8_t aes_cbc_iv[] = {
/* ***** AES-CBC / HMAC-SHA1 Hash Tests ***** */
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_ciphertext[] = {
- 0x8B, 0X4D, 0XDA, 0X1B, 0XCF, 0X04, 0XA0, 0X31,
- 0XB4, 0XBF, 0XBD, 0X68, 0X43, 0X20, 0X7E, 0X76,
- 0XB1, 0X96, 0X8B, 0XA2, 0X7C, 0XA2, 0X83, 0X9E,
- 0X39, 0X5A, 0X2F, 0X7E, 0X92, 0XB4, 0X48, 0X1A,
- 0X3F, 0X6B, 0X5D, 0XDF, 0X52, 0X85, 0X5F, 0X8E,
- 0X42, 0X3C, 0XFB, 0XE9, 0X1A, 0X24, 0XD6, 0X08,
- 0XDD, 0XFD, 0X16, 0XFB, 0XE9, 0X55, 0XEF, 0XF0,
- 0XA0, 0X8D, 0X13, 0XAB, 0X81, 0XC6, 0X90, 0X01,
- 0XB5, 0X18, 0X84, 0XB3, 0XF6, 0XE6, 0X11, 0X57,
- 0XD6, 0X71, 0XC6, 0X3C, 0X3F, 0X2F, 0X33, 0XEE,
- 0X24, 0X42, 0X6E, 0XAC, 0X0B, 0XCA, 0XEC, 0XF9,
- 0X84, 0XF8, 0X22, 0XAA, 0X60, 0XF0, 0X32, 0XA9,
- 0X75, 0X75, 0X3B, 0XCB, 0X70, 0X21, 0X0A, 0X8D,
- 0X0F, 0XE0, 0XC4, 0X78, 0X2B, 0XF8, 0X97, 0XE3,
- 0XE4, 0X26, 0X4B, 0X29, 0XDA, 0X88, 0XCD, 0X46,
- 0XEC, 0XAA, 0XF9, 0X7F, 0XF1, 0X15, 0XEA, 0XC3,
- 0X87, 0XE6, 0X31, 0XF2, 0XCF, 0XDE, 0X4D, 0X80,
- 0X70, 0X91, 0X7E, 0X0C, 0XF7, 0X26, 0X3A, 0X92,
- 0X4F, 0X18, 0X83, 0XC0, 0X8F, 0X59, 0X01, 0XA5,
- 0X88, 0XD1, 0XDB, 0X26, 0X71, 0X27, 0X16, 0XF5,
- 0XEE, 0X10, 0X82, 0XAC, 0X68, 0X26, 0X9B, 0XE2,
- 0X6D, 0XD8, 0X9A, 0X80, 0XDF, 0X04, 0X31, 0XD5,
- 0XF1, 0X35, 0X5C, 0X3B, 0XDD, 0X9A, 0X65, 0XBA,
- 0X58, 0X34, 0X85, 0X61, 0X1C, 0X42, 0X10, 0X76,
- 0X73, 0X02, 0X42, 0XC9, 0X23, 0X18, 0X8E, 0XB4,
- 0X6F, 0XB4, 0XA3, 0X54, 0X6E, 0X88, 0X3B, 0X62,
- 0X7C, 0X02, 0X8D, 0X4C, 0X9F, 0XC8, 0X45, 0XF4,
- 0XC9, 0XDE, 0X4F, 0XEB, 0X22, 0X83, 0X1B, 0XE4,
- 0X49, 0X37, 0XE4, 0XAD, 0XE7, 0XCD, 0X21, 0X54,
- 0XBC, 0X1C, 0XC2, 0X04, 0X97, 0XB4, 0X10, 0X61,
- 0XF0, 0XE4, 0XEF, 0X27, 0X63, 0X3A, 0XDA, 0X91,
- 0X41, 0X25, 0X62, 0X1C, 0X5C, 0XB6, 0X38, 0X4A,
- 0X88, 0X71, 0X59, 0X5A, 0X8D, 0XA0, 0X09, 0XAF,
- 0X72, 0X94, 0XD7, 0X79, 0X5C, 0X60, 0X7C, 0X8F,
- 0X4C, 0XF5, 0XD9, 0XA1, 0X39, 0X6D, 0X81, 0X28,
- 0XEF, 0X13, 0X28, 0XDF, 0XF5, 0X3E, 0XF7, 0X8E,
- 0X09, 0X9C, 0X78, 0X18, 0X79, 0XB8, 0X68, 0XD7,
- 0XA8, 0X29, 0X62, 0XAD, 0XDE, 0XE1, 0X61, 0X76,
- 0X1B, 0X05, 0X16, 0XCD, 0XBF, 0X02, 0X8E, 0XA6,
- 0X43, 0X6E, 0X92, 0X55, 0X4F, 0X60, 0X9C, 0X03,
- 0XB8, 0X4F, 0XA3, 0X02, 0XAC, 0XA8, 0XA7, 0X0C,
- 0X1E, 0XB5, 0X6B, 0XF8, 0XC8, 0X4D, 0XDE, 0XD2,
- 0XB0, 0X29, 0X6E, 0X40, 0XE6, 0XD6, 0XC9, 0XE6,
- 0XB9, 0X0F, 0XB6, 0X63, 0XF5, 0XAA, 0X2B, 0X96,
- 0XA7, 0X16, 0XAC, 0X4E, 0X0A, 0X33, 0X1C, 0XA6,
- 0XE6, 0XBD, 0X8A, 0XCF, 0X40, 0XA9, 0XB2, 0XFA,
- 0X63, 0X27, 0XFD, 0X9B, 0XD9, 0XFC, 0XD5, 0X87,
- 0X8D, 0X4C, 0XB6, 0XA4, 0XCB, 0XE7, 0X74, 0X55,
- 0XF4, 0XFB, 0X41, 0X25, 0XB5, 0X4B, 0X0A, 0X1B,
- 0XB1, 0XD6, 0XB7, 0XD9, 0X47, 0X2A, 0XC3, 0X98,
- 0X6A, 0XC4, 0X03, 0X73, 0X1F, 0X93, 0X6E, 0X53,
- 0X19, 0X25, 0X64, 0X15, 0X83, 0XF9, 0X73, 0X2A,
- 0X74, 0XB4, 0X93, 0X69, 0XC4, 0X72, 0XFC, 0X26,
- 0XA2, 0X9F, 0X43, 0X45, 0XDD, 0XB9, 0XEF, 0X36,
- 0XC8, 0X3A, 0XCD, 0X99, 0X9B, 0X54, 0X1A, 0X36,
- 0XC1, 0X59, 0XF8, 0X98, 0XA8, 0XCC, 0X28, 0X0D,
- 0X73, 0X4C, 0XEE, 0X98, 0XCB, 0X7C, 0X58, 0X7E,
- 0X20, 0X75, 0X1E, 0XB7, 0XC9, 0XF8, 0XF2, 0X0E,
- 0X63, 0X9E, 0X05, 0X78, 0X1A, 0XB6, 0XA8, 0X7A,
- 0XF9, 0X98, 0X6A, 0XA6, 0X46, 0X84, 0X2E, 0XF6,
- 0X4B, 0XDC, 0X9B, 0X8F, 0X9B, 0X8F, 0XEE, 0XB4,
- 0XAA, 0X3F, 0XEE, 0XC0, 0X37, 0X27, 0X76, 0XC7,
- 0X95, 0XBB, 0X26, 0X74, 0X69, 0X12, 0X7F, 0XF1,
- 0XBB, 0XFF, 0XAE, 0XB5, 0X99, 0X6E, 0XCB, 0X0C
+ 0x8B, 0x4D, 0xDA, 0x1B, 0xCF, 0x04, 0xA0, 0x31,
+ 0xB4, 0xBF, 0xBD, 0x68, 0x43, 0x20, 0x7E, 0x76,
+ 0xB1, 0x96, 0x8B, 0xA2, 0x7C, 0xA2, 0x83, 0x9E,
+ 0x39, 0x5A, 0x2F, 0x7E, 0x92, 0xB4, 0x48, 0x1A,
+ 0x3F, 0x6B, 0x5D, 0xDF, 0x52, 0x85, 0x5F, 0x8E,
+ 0x42, 0x3C, 0xFB, 0xE9, 0x1A, 0x24, 0xD6, 0x08,
+ 0xDD, 0xFD, 0x16, 0xFB, 0xE9, 0x55, 0xEF, 0xF0,
+ 0xA0, 0x8D, 0x13, 0xAB, 0x81, 0xC6, 0x90, 0x01,
+ 0xB5, 0x18, 0x84, 0xB3, 0xF6, 0xE6, 0x11, 0x57,
+ 0xD6, 0x71, 0xC6, 0x3C, 0x3F, 0x2F, 0x33, 0xEE,
+ 0x24, 0x42, 0x6E, 0xAC, 0x0B, 0xCA, 0xEC, 0xF9,
+ 0x84, 0xF8, 0x22, 0xAA, 0x60, 0xF0, 0x32, 0xA9,
+ 0x75, 0x75, 0x3B, 0xCB, 0x70, 0x21, 0x0A, 0x8D,
+ 0x0F, 0xE0, 0xC4, 0x78, 0x2B, 0xF8, 0x97, 0xE3,
+ 0xE4, 0x26, 0x4B, 0x29, 0xDA, 0x88, 0xCD, 0x46,
+ 0xEC, 0xAA, 0xF9, 0x7F, 0xF1, 0x15, 0xEA, 0xC3,
+ 0x87, 0xE6, 0x31, 0xF2, 0xCF, 0xDE, 0x4D, 0x80,
+ 0x70, 0x91, 0x7E, 0x0C, 0xF7, 0x26, 0x3A, 0x92,
+ 0x4F, 0x18, 0x83, 0xC0, 0x8F, 0x59, 0x01, 0xA5,
+ 0x88, 0xD1, 0xDB, 0x26, 0x71, 0x27, 0x16, 0xF5,
+ 0xEE, 0x10, 0x82, 0xAC, 0x68, 0x26, 0x9B, 0xE2,
+ 0x6D, 0xD8, 0x9A, 0x80, 0xDF, 0x04, 0x31, 0xD5,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x73, 0x02, 0x42, 0xC9, 0x23, 0x18, 0x8E, 0xB4,
+ 0x6F, 0xB4, 0xA3, 0x54, 0x6E, 0x88, 0x3B, 0x62,
+ 0x7C, 0x02, 0x8D, 0x4C, 0x9F, 0xC8, 0x45, 0xF4,
+ 0xC9, 0xDE, 0x4F, 0xEB, 0x22, 0x83, 0x1B, 0xE4,
+ 0x49, 0x37, 0xE4, 0xAD, 0xE7, 0xCD, 0x21, 0x54,
+ 0xBC, 0x1C, 0xC2, 0x04, 0x97, 0xB4, 0x10, 0x61,
+ 0xF0, 0xE4, 0xEF, 0x27, 0x63, 0x3A, 0xDA, 0x91,
+ 0x41, 0x25, 0x62, 0x1C, 0x5C, 0xB6, 0x38, 0x4A,
+ 0x88, 0x71, 0x59, 0x5A, 0x8D, 0xA0, 0x09, 0xAF,
+ 0x72, 0x94, 0xD7, 0x79, 0x5C, 0x60, 0x7C, 0x8F,
+ 0x4C, 0xF5, 0xD9, 0xA1, 0x39, 0x6D, 0x81, 0x28,
+ 0xEF, 0x13, 0x28, 0xDF, 0xF5, 0x3E, 0xF7, 0x8E,
+ 0x09, 0x9C, 0x78, 0x18, 0x79, 0xB8, 0x68, 0xD7,
+ 0xA8, 0x29, 0x62, 0xAD, 0xDE, 0xE1, 0x61, 0x76,
+ 0x1B, 0x05, 0x16, 0xCD, 0xBF, 0x02, 0x8E, 0xA6,
+ 0x43, 0x6E, 0x92, 0x55, 0x4F, 0x60, 0x9C, 0x03,
+ 0xB8, 0x4F, 0xA3, 0x02, 0xAC, 0xA8, 0xA7, 0x0C,
+ 0x1E, 0xB5, 0x6B, 0xF8, 0xC8, 0x4D, 0xDE, 0xD2,
+ 0xB0, 0x29, 0x6E, 0x40, 0xE6, 0xD6, 0xC9, 0xE6,
+ 0xB9, 0x0F, 0xB6, 0x63, 0xF5, 0xAA, 0x2B, 0x96,
+ 0xA7, 0x16, 0xAC, 0x4E, 0x0A, 0x33, 0x1C, 0xA6,
+ 0xE6, 0xBD, 0x8A, 0xCF, 0x40, 0xA9, 0xB2, 0xFA,
+ 0x63, 0x27, 0xFD, 0x9B, 0xD9, 0xFC, 0xD5, 0x87,
+ 0x8D, 0x4C, 0xB6, 0xA4, 0xCB, 0xE7, 0x74, 0x55,
+ 0xF4, 0xFB, 0x41, 0x25, 0xB5, 0x4B, 0x0A, 0x1B,
+ 0xB1, 0xD6, 0xB7, 0xD9, 0x47, 0x2A, 0xC3, 0x98,
+ 0x6A, 0xC4, 0x03, 0x73, 0x1F, 0x93, 0x6E, 0x53,
+ 0x19, 0x25, 0x64, 0x15, 0x83, 0xF9, 0x73, 0x2A,
+ 0x74, 0xB4, 0x93, 0x69, 0xC4, 0x72, 0xFC, 0x26,
+ 0xA2, 0x9F, 0x43, 0x45, 0xDD, 0xB9, 0xEF, 0x36,
+ 0xC8, 0x3A, 0xCD, 0x99, 0x9B, 0x54, 0x1A, 0x36,
+ 0xC1, 0x59, 0xF8, 0x98, 0xA8, 0xCC, 0x28, 0x0D,
+ 0x73, 0x4C, 0xEE, 0x98, 0xCB, 0x7C, 0x58, 0x7E,
+ 0x20, 0x75, 0x1E, 0xB7, 0xC9, 0xF8, 0xF2, 0x0E,
+ 0x63, 0x9E, 0x05, 0x78, 0x1A, 0xB6, 0xA8, 0x7A,
+ 0xF9, 0x98, 0x6A, 0xA6, 0x46, 0x84, 0x2E, 0xF6,
+ 0x4B, 0xDC, 0x9B, 0x8F, 0x9B, 0x8F, 0xEE, 0xB4,
+ 0xAA, 0x3F, 0xEE, 0xC0, 0x37, 0x27, 0x76, 0xC7,
+ 0x95, 0xBB, 0x26, 0x74, 0x69, 0x12, 0x7F, 0xF1,
+ 0xBB, 0xFF, 0xAE, 0xB5, 0x99, 0x6E, 0xCB, 0x0C
};
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest[] = {
- 0x9a, 0X4f, 0X88, 0X1b, 0Xb6, 0X8f, 0Xd8, 0X60,
- 0X42, 0X1a, 0X7d, 0X3d, 0Xf5, 0X82, 0X80, 0Xf1,
- 0X18, 0X8c, 0X1d, 0X32 };
+ 0x9a, 0x4f, 0x88, 0x1b, 0xb6, 0x8f, 0xd8, 0x60,
+ 0x42, 0x1a, 0x7d, 0x3d, 0xf5, 0x82, 0x80, 0xf1,
+ 0x18, 0x8c, 0x1d, 0x32
+};
static int
@@ -776,7 +777,6 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
-
ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
@@ -786,59 +786,65 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.key.data = hmac_sha1_key;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
- /* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ /* Create crypto session*/
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
- /* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ /* Generate crypto op data structure */
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ /* Set crypto operation authentication parameters */
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ /* Set crypto operation cipher parameters */
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ uint8_t *ciphertext = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC);
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(ciphertext,
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
- "Ciphertext data not as expected");
+ "ciphertext data not as expected");
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ uint8_t *digest = ciphertext + QUOTE_512_BYTES;
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(digest,
catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest,
gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 :
@@ -863,60 +869,66 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
- ut_params->ol, 2),
+ TEST_ASSERT_NOT_NULL(rte_crypto_op_sym_xforms_alloc(ut_params->op, 2),
"failed to allocate space for crypto transforms");
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ sym_op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
/* cipher parameters */
- ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- ut_params->op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->op->xform->cipher.key.data = aes_cbc_key;
- ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
+ sym_op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ sym_op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ sym_op->xform->cipher.key.data = aes_cbc_key;
+ sym_op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ sym_op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- ut_params->op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
- ut_params->op->xform->next->auth.key.data = hmac_sha1_key;
- ut_params->op->xform->next->auth.digest_length =
+ sym_op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ sym_op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ sym_op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
+ sym_op->xform->next->auth.key.data = hmac_sha1_key;
+ sym_op->xform->next->auth.digest_length =
DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -985,42 +997,48 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ /* attach symmetric crypto session to crypto operations */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
+
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1088,47 +1106,51 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1197,42 +1219,47 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
- /* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1244,7 +1271,6 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
"Digest verification failed");
-
return TEST_SUCCESS;
}
@@ -1312,43 +1338,46 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
-
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1367,7 +1396,6 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
DIGEST_BYTE_LENGTH_SHA512,
"Generated digest data not as expected");
-
return TEST_SUCCESS;
}
@@ -1424,6 +1452,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
ut_params->auth_xform.auth.key.data = hmac_sha512_key;
ut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA512;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
+
return TEST_SUCCESS;
}
@@ -1448,43 +1477,46 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
- /* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1497,8 +1529,6 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
"Digest verification failed");
-
-
return TEST_SUCCESS;
}
@@ -1524,10 +1554,6 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
catch_22_quote, QUOTE_512_BYTES, 0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_AES_XCBC);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
@@ -1548,56 +1574,75 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->iv.data = (uint8_t *)
- rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ /* Set operation cipher parameters */
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->m_src, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(sym_op->m_src);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
+
+ /* Set operation authentication parameters */
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ sym_op->m_src, DIGEST_BYTE_LENGTH_AES_XCBC);
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->m_src,
+ CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES);
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+ memset(sym_op->auth.digest.data, 0, DIGEST_BYTE_LENGTH_AES_XCBC);
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC),
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
"Ciphertext data not as expected");
+
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ rte_pktmbuf_mtod_offset(
+ ut_params->op->sym->m_src, uint8_t *,
+ CIPHER_IV_LENGTH_AES_CBC +
+ QUOTE_512_BYTES),
catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
- DIGEST_BYTE_LENGTH_AES_XCBC,
+ gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
+ TRUNCATED_DIGEST_BYTE_LENGTH_AES_XCBC :
+ DIGEST_BYTE_LENGTH_AES_XCBC,
"Generated digest data not as expected");
return TEST_SUCCESS;
@@ -1614,14 +1659,6 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
(const char *)catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES, 0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_AES_XCBC);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
- rte_memcpy(ut_params->digest,
- catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
- DIGEST_BYTE_LENGTH_AES_XCBC);
-
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
@@ -1648,35 +1685,55 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ /* Set crypto operation data parameters */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ ut_params->ibuf, DIGEST_BYTE_LENGTH_AES_XCBC);
+ TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+ "no room to append digest");
+
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ ut_params->ibuf, QUOTE_512_BYTES);
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+ rte_memcpy(sym_op->auth.digest.data,
+ catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
+ DIGEST_BYTE_LENGTH_AES_XCBC);
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1837,50 +1894,53 @@ test_not_in_place_crypto(void)
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+ sym_op->m_dst = dst_m;
+
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
-
- ut_params->op->dst.m = dst_m;
- ut_params->op->dst.offset = 0;
-
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "no crypto operation returned");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto operation processing failed");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->op->dst.m, char *),
+ rte_pktmbuf_mtod(ut_params->op->sym->m_dst, char *),
catch_22_quote,
QUOTE_512_BYTES,
"Plaintext data not as expected");
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 034393e..c84ba42 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -32,8 +32,6 @@
#ifndef TEST_CRYPTODEV_H_
#define TEST_CRYPTODEV_H_
-#define HEX_DUMP 0
-
#define FALSE 0
#define TRUE 1
@@ -47,8 +45,9 @@
#define DEFAULT_NUM_XFORMS (2)
#define NUM_MBUFS (8191)
#define MBUF_CACHE_SIZE (250)
-#define MBUF_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512 + \
- sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATAPAYLOAD_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512)
+#define MBUF_SIZE (sizeof(struct rte_mbuf) + \
+ RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
#define BYTE_LENGTH(x) (x/8)
/* HASH DIGEST LENGTHS */
@@ -62,7 +61,9 @@
#define AES_XCBC_MAC_KEY_SZ (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 (12)
+#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA224 (14)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA256 (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA512 (32)
+#define TRUNCATED_DIGEST_BYTE_LENGTH_AES_XCBC (12)
#endif /* TEST_CRYPTODEV_H_ */
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 42dd9bc..b3f4fd9 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -32,7 +32,6 @@
#include <rte_common.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
@@ -50,7 +49,7 @@
struct crypto_testsuite_params {
struct rte_mempool *mbuf_mp;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
uint16_t nb_queue_pairs;
@@ -68,8 +67,7 @@ struct crypto_unittest_params {
struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_sym_op *op;
- struct rte_mbuf_offload *ol;
+ struct rte_crypto_op *op;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
struct rte_mbuf *ibuf[MAX_NUM_OF_OPS_PER_UT];
@@ -79,7 +77,7 @@ struct crypto_unittest_params {
static struct rte_mbuf *
setup_test_string(struct rte_mempool *mpool,
- const char *string, size_t len, uint8_t blocksize)
+ const uint8_t *data, size_t len, uint8_t blocksize)
{
struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
size_t t_len = len - (blocksize ? (len % blocksize) : 0);
@@ -92,7 +90,7 @@ setup_test_string(struct rte_mempool *mpool,
return NULL;
}
- rte_memcpy(dst, string, t_len);
+ rte_memcpy(dst, (const void *)data, t_len);
}
return m;
}
@@ -113,23 +111,24 @@ testsuite_setup(void)
ts_params->mbuf_mp = rte_mempool_lookup("CRYPTO_PERF_MBUFPOOL");
if (ts_params->mbuf_mp == NULL) {
/* Not already created so create */
- ts_params->mbuf_mp = rte_mempool_create("CRYPTO_PERF_MBUFPOOL", NUM_MBUFS,
- MBUF_SIZE, MBUF_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
- rte_socket_id(), 0);
+ ts_params->mbuf_mp = rte_pktmbuf_pool_create(
+ "CRYPTO_PERF_MBUFPOOL",
+ NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
+ rte_socket_id());
if (ts_params->mbuf_mp == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_PERF_MBUFPOOL\n");
return TEST_FAILED;
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
- NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_sym_xform),
- rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+
+ ts_params->op_mpool = rte_crypto_op_pool_create("CRYPTO_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ NUM_MBUFS, MBUF_CACHE_SIZE,
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_sym_xform),
+ rte_socket_id());
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -256,8 +255,8 @@ ut_teardown(void)
ut_params->sess);
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
for (i = 0; i < MAX_NUM_OF_OPS_PER_UT; i++) {
if (ut_params->obuf[i])
@@ -1698,11 +1697,12 @@ struct crypto_data_params aes_cbc_hmac_sha256_output[MAX_PACKET_SIZE_INDEX] = {
static int
test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
{
- uint32_t num_to_submit = 2048, max_outstanding_reqs = 512;
- struct rte_mbuf *rx_mbufs[num_to_submit], *tx_mbufs[num_to_submit];
+ uint32_t num_to_submit = 4096;
+ struct rte_crypto_op *c_ops[num_to_submit];
+ struct rte_crypto_op *proc_ops[num_to_submit];
uint64_t failed_polls, retries, start_cycles, end_cycles, total_cycles = 0;
uint32_t burst_sent, burst_received;
- uint32_t b, burst_size, num_sent, num_received;
+ uint32_t i, burst_size, num_sent, num_received;
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1739,46 +1739,50 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure(s) */
- for (b = 0; b < num_to_submit ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ for (i = 0; i < num_to_submit ; i++) {
+ struct rte_mbuf *m = setup_test_string(ts_params->mbuf_mp,
+ data_params[0].expected.ciphertext,
data_params[0].length, 0);
- TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
+ TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(tx_mbufs[b],
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ TEST_ASSERT_NOT_NULL(ut_params->digest,
+ "no room to append digest");
rte_memcpy(ut_params->digest, data_params[0].expected.digest,
DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_sym_op *cop = &ol->op.crypto;
+ struct rte_crypto_op *op =
+ rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+ rte_crypto_op_attach_sym_session(op, ut_params->sess);
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
+ op->sym->auth.digest.data = ut_params->digest;
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
data_params[0].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->auth.data.length = data_params[0].length;
+
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ op->sym->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(m,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[0].length;
+ op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.data.length = data_params[0].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[0].length;
+ op->sym->m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[i] = op;
}
printf("\nTest to measure the IA cycle cost using AES128_CBC_SHA256_HMAC "
@@ -1789,17 +1793,17 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
printf("\nDev No\tQP No\tNum Sent\tNum Received\tTx/Rx burst");
printf("\tRetries (Device Busy)\tAverage IA cycle cost "
"(assuming 0 retries)");
- for (b = 2; b <= 128 ; b *= 2) {
+ for (i = 2; i <= 128 ; i *= 2) {
num_sent = 0;
num_received = 0;
retries = 0;
failed_polls = 0;
- burst_size = b;
+ burst_size = i;
total_cycles = 0;
while (num_sent < num_to_submit) {
start_cycles = rte_rdtsc_precise();
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0,
- &tx_mbufs[num_sent],
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, &c_ops[num_sent],
((num_to_submit-num_sent) < burst_size) ?
num_to_submit-num_sent : burst_size);
if (burst_sent == 0)
@@ -1814,9 +1818,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1824,15 +1827,15 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
end_cycles = rte_rdtsc_precise();
total_cycles += end_cycles - start_cycles;
}
+
while (num_received != num_to_submit) {
if (gbl_cryptodev_preftest_devtype ==
- RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num,
- 0, NULL, 0);
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1846,16 +1849,9 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
printf("\n");
- for (b = 0; b < max_outstanding_reqs ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ for (i = 0; i < num_to_submit ; i++) {
+ rte_pktmbuf_free(c_ops[i]->sym->m_src);
+ rte_crypto_op_free(c_ops[i]);
}
return TEST_SUCCESS;
}
@@ -1865,11 +1861,14 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
{
uint16_t index;
uint32_t burst_sent, burst_received;
- uint32_t b, num_sent, num_received, throughput;
+ uint32_t b, num_sent, num_received;
uint64_t failed_polls, retries, start_cycles, end_cycles;
const uint64_t mhz = rte_get_tsc_hz()/1000000;
- double mmps;
- struct rte_mbuf *rx_mbufs[DEFAULT_BURST_SIZE], *tx_mbufs[DEFAULT_BURST_SIZE];
+ double throughput, mmps;
+
+ struct rte_crypto_op *c_ops[DEFAULT_BURST_SIZE];
+ struct rte_crypto_op *proc_ops[DEFAULT_BURST_SIZE];
+
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1908,7 +1907,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
"AES128_CBC_SHA256_HMAC requests with a constant burst "
"size of %u while varying payload sizes", DEFAULT_BURST_SIZE);
printf("\nDev No\tQP No\tReq Size(B)\tNum Sent\tNum Received\t"
- "Mrps\tThoughput(Mbps)");
+ "Mrps\tThoughput(Gbps)");
printf("\tRetries (Attempted a burst, but the device was busy)");
for (index = 0; index < MAX_PACKET_SIZE_INDEX; index++) {
num_sent = 0;
@@ -1918,63 +1917,70 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
+ struct rte_mbuf *m = setup_test_string(
+ ts_params->mbuf_mp,
+ (const uint8_t *)
data_params[index].plaintext,
data_params[index].length,
0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(
- tx_mbufs[b], DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
+ DIGEST_BYTE_LENGTH_SHA256);
+ TEST_ASSERT_NOT_NULL(ut_params->digest
+ , "no room to append digest");
- rte_memcpy(ut_params->digest, data_params[index].expected.digest,
- DIGEST_BYTE_LENGTH_SHA256);
+ rte_memcpy(ut_params->digest,
+ data_params[index].expected.digest,
+ DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
+ struct rte_crypto_op *op = rte_crypto_op_alloc(
+ ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- struct rte_crypto_sym_op *cop = &ol->op.crypto;
+ rte_crypto_op_attach_sym_session(op, ut_params->sess);
- rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+ op->sym->auth.digest.data = ut_params->digest;
+ op->sym->auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(m,
+ data_params[index].length);
+ op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
- tx_mbufs[b], data_params[index].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->auth.data.length = data_params[index].length;
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ op->sym->cipher.iv.data = (uint8_t *)
+ rte_pktmbuf_prepend(m,
+ CIPHER_IV_LENGTH_AES_CBC);
+ op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.data.length = data_params[index].length;
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[index].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[index].length;
+ op->sym->m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[b] = op;
}
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num,
- 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
- < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
- DEFAULT_BURST_SIZE);
+ uint16_t burst_size = (DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent) < DEFAULT_BURST_SIZE ?
+ DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent : DEFAULT_BURST_SIZE;
+
+ burst_sent = rte_cryptodev_enqueue_burst(
+ dev_num, 0, c_ops, burst_size);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, DEFAULT_BURST_SIZE);
+ burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ 0, proc_ops, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
@@ -1982,37 +1988,34 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
if (gbl_cryptodev_preftest_devtype ==
- RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num,
- 0, NULL, 0);
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num, 0,
- rx_mbufs, DEFAULT_BURST_SIZE);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops,
+ DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
num_received += burst_received;
}
end_cycles = rte_rdtsc_precise();
- mmps = (double)num_received*mhz/(end_cycles - start_cycles);
- throughput = mmps*data_params[index].length*8;
+ mmps = ((double)num_received * mhz) /
+ (end_cycles - start_cycles);
+ throughput = (mmps * data_params[index].length * 8) / 1000;
+
printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
- data_params[index].length, num_sent, num_received);
- printf("\t%.2f\t%u", mmps, throughput);
+ data_params[index].length,
+ num_sent, num_received);
+ printf("\t%.2f\t%.2f", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ rte_pktmbuf_free(c_ops[b]->sym->m_src);
+ rte_crypto_op_free(c_ops[b]);
}
}
+
printf("\n");
return TEST_SUCCESS;
}
diff --git a/config/common_bsdapp b/config/common_bsdapp
index 7df5ac6..54255f8 100644
--- a/config/common_bsdapp
+++ b/config/common_bsdapp
@@ -301,7 +301,7 @@ CONFIG_RTE_PMD_PACKET_PREFETCH=y
#
# Compile generic crypto device library
-# EXPERIMENTAL: API may change without prior notice
+# EXPERIMENTAL: API may change without prior notice.
#
CONFIG_RTE_LIBRTE_CRYPTODEV=y
CONFIG_RTE_LIBRTE_CRYPTODEV_DEBUG=n
@@ -352,13 +352,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
#
-# Compile librte_mbuf_offload
-# EXPERIMENTAL: API may change without prior notice
-#
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n
-
-#
# Compile librte_timer
#
CONFIG_RTE_LIBRTE_TIMER=y
diff --git a/config/common_linuxapp b/config/common_linuxapp
index 26df137..654e0bd 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -315,7 +315,7 @@ CONFIG_RTE_PMD_PACKET_PREFETCH=y
#
# Compile generic crypto device library
-# EXPERIMENTAL: API may change without prior notice
+# EXPERIMENTAL: API may change without prior notice.
#
CONFIG_RTE_LIBRTE_CRYPTODEV=y
CONFIG_RTE_LIBRTE_CRYPTODEV_DEBUG=n
@@ -368,13 +368,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
#
-# Compile librte_mbuf_offload
-# EXPERIMENTAL: API may change without prior notice
-#
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n
-
-#
# Compile librte_timer
#
CONFIG_RTE_LIBRTE_TIMER=y
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 7a91001..f626386 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -104,7 +104,6 @@ There are many libraries, so their headers may be grouped by topics:
- **containers**:
[mbuf] (@ref rte_mbuf.h),
- [mbuf_offload] (@ref rte_mbuf_offload.h),
[ring] (@ref rte_ring.h),
[distributor] (@ref rte_distributor.h),
[reorder] (@ref rte_reorder.h),
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 61d93cd..5b26444 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -37,7 +37,6 @@
#include <rte_dev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
-#include <rte_mbuf_offload.h>
#include "rte_aesni_mb_pmd_private.h"
@@ -296,16 +295,16 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(crypto_op->session->type !=
+ if (op->sym->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
- sess = (struct aesni_mb_session *)crypto_op->session->_private;
+ sess = (struct aesni_mb_session *)op->sym->session->_private;
} else {
void *_sess = NULL;
@@ -316,7 +315,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
- sess, crypto_op->xform) != 0)) {
+ sess, op->sym->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
}
@@ -338,12 +337,14 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
* - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
*/
static JOB_AES_HMAC *
-process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_sym_op *c_op,
+process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
+ struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+ uint16_t m_offset = 0;
+
job = (*qp->ops->job.get_next)(&qp->mb_mgr);
if (unlikely(job == NULL))
return job;
@@ -372,49 +373,65 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
}
/* Mutable crypto operation parameters */
+ if (op->sym->m_dst) {
+ m_src = m_dst = op->sym->m_dst;
+
+ /* append space for output data to mbuf */
+ char *odata = rte_pktmbuf_append(m_dst,
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (odata == NULL)
+ MB_LOG_ERR("failed to allocate space in destination "
+ "mbuf for source data");
+
+ memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ } else {
+ m_dst = m_src;
+ m_offset = op->sym->cipher.data.offset;
+ }
/* Set digest output location */
if (job->cipher_direction == DECRYPT) {
- job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m,
+ job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
get_digest_byte_length(job->hash_alg));
- if (job->auth_tag_output)
- memset(job->auth_tag_output, 0,
- sizeof(get_digest_byte_length(job->hash_alg)));
- else
+ if (job->auth_tag_output == NULL) {
+ MB_LOG_ERR("failed to allocate space in output mbuf "
+ "for temp digest");
return NULL;
+ }
+
+ memset(job->auth_tag_output, 0,
+ sizeof(get_digest_byte_length(job->hash_alg)));
+
} else {
- job->auth_tag_output = c_op->digest.data;
+ job->auth_tag_output = op->sym->auth.digest.data;
}
/*
- * Multiple buffer library current only support returning a truncated
+ * Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
job->auth_tag_output_len_in_bytes =
get_truncated_digest_byte_length(job->hash_alg);
/* Set IV parameters */
- job->iv = c_op->iv.data;
- job->iv_len_in_bytes = c_op->iv.length;
+ job->iv = op->sym->cipher.iv.data;
+ job->iv_len_in_bytes = op->sym->cipher.iv.length;
/* Data Parameter */
- job->src = rte_pktmbuf_mtod(m, uint8_t *);
- job->dst = c_op->dst.m ?
- rte_pktmbuf_mtod(c_op->dst.m, uint8_t *) +
- c_op->dst.offset :
- rte_pktmbuf_mtod(m, uint8_t *) +
- c_op->data.to_cipher.offset;
+ job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
- job->cipher_start_src_offset_in_bytes = c_op->data.to_cipher.offset;
- job->msg_len_to_cipher_in_bytes = c_op->data.to_cipher.length;
+ job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
- job->hash_start_src_offset_in_bytes = c_op->data.to_hash.offset;
- job->msg_len_to_hash_in_bytes = c_op->data.to_hash.length;
+ job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
/* Set user data to be crypto operation data struct */
- job->user_data = m;
- job->user_data2 = c_op;
+ job->user_data = op;
+ job->user_data2 = m_dst;
return job;
}
@@ -429,43 +446,41 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
* verification of supplied digest in the case of a HASH_CIPHER operation
* - Returns NULL on invalid job
*/
-static struct rte_mbuf *
+static struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m;
- struct rte_crypto_sym_op *c_op;
+ struct rte_crypto_op *op =
+ (struct rte_crypto_op *)job->user_data;
+ struct rte_mbuf *m_dst =
+ (struct rte_mbuf *)job->user_data2;
- if (job->user_data == NULL)
+ if (op == NULL || m_dst == NULL)
return NULL;
- /* handled retrieved job */
- m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_sym_op *)job->user_data2;
-
/* set status as successful by default */
- c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* check if job has been processed */
if (unlikely(job->status != STS_COMPLETED)) {
- c_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return m;
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return op;
} else if (job->chain_order == HASH_CIPHER) {
/* Verify digest if required */
- if (memcmp(job->auth_tag_output, c_op->digest.data,
+ if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
job->auth_tag_output_len_in_bytes) != 0)
- c_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m, get_digest_byte_length(job->hash_alg));
+ rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
- rte_mempool_put(qp->sess_mp, c_op->session);
- c_op->session = NULL;
+ if (op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
}
- return m;
+ return op;
}
/**
@@ -481,16 +496,16 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
static unsigned
handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m = NULL;
+ struct rte_crypto_op *op = NULL;
unsigned processed_jobs = 0;
while (job) {
processed_jobs++;
- m = post_process_mb_job(qp, job);
- if (m)
- rte_ring_enqueue(qp->processed_pkts, (void *)m);
+ op = post_process_mb_job(qp, job);
+ if (op)
+ rte_ring_enqueue(qp->processed_pkts, (void *)op);
else
- qp->qp_stats.dequeue_err_count++;
+ qp->stats.dequeue_err_count++;
job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
}
@@ -499,53 +514,53 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
static uint16_t
-aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
- uint16_t nb_bufs)
+aesni_mb_pmd_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
-
struct aesni_mb_session *sess;
- struct aesni_mb_qp *qp = queue_pair;
+ struct aesni_mb_qp *_qp = qp;
JOB_AES_HMAC *job = NULL;
int i, processed_jobs = 0;
- for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i],
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- if (unlikely(ol == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ for (i = 0; i < nb_ops; i++) {
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ MB_LOG_ERR("PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
-
- sess = get_session(qp, &ol->op.crypto);
+#endif
+ sess = get_session(_qp, ops[i]);
if (unlikely(sess == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
- job = process_crypto_op(qp, bufs[i], &ol->op.crypto, sess);
+ job = process_crypto_op(_qp, ops[i], sess);
if (unlikely(job == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
/* Submit Job */
- job = (*qp->ops->job.submit)(&qp->mb_mgr);
+ job = (*_qp->ops->job.submit)(&_qp->mb_mgr);
/*
* If submit returns a processed job then handle it,
* before submitting subsequent jobs
*/
if (job)
- processed_jobs += handle_completed_jobs(qp, job);
+ processed_jobs += handle_completed_jobs(_qp, job);
}
if (processed_jobs == 0)
goto flush_jobs;
else
- qp->qp_stats.enqueued_count += processed_jobs;
+ _qp->stats.enqueued_count += processed_jobs;
return i;
flush_jobs:
@@ -553,24 +568,24 @@ flush_jobs:
* If we haven't processed any jobs in submit loop, then flush jobs
* queue to stop the output stalling
*/
- job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
+ job = (*_qp->ops->job.flush_job)(&_qp->mb_mgr);
if (job)
- qp->qp_stats.enqueued_count += handle_completed_jobs(qp, job);
+ _qp->stats.enqueued_count += handle_completed_jobs(_qp, job);
return i;
}
static uint16_t
-aesni_mb_pmd_dequeue_burst(void *queue_pair,
- struct rte_mbuf **bufs, uint16_t nb_bufs)
+aesni_mb_pmd_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct aesni_mb_qp *qp = queue_pair;
+ struct aesni_mb_qp *_qp = qp;
unsigned nb_dequeued;
- nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)bufs, nb_bufs);
- qp->qp_stats.dequeued_count += nb_dequeued;
+ nb_dequeued = rte_ring_dequeue_burst(_qp->processed_pkts,
+ (void **)ops, nb_ops);
+ _qp->stats.dequeued_count += nb_dequeued;
return nb_dequeued;
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 3cd9990..d56de12 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -76,11 +76,11 @@ aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- stats->enqueued_count += qp->qp_stats.enqueued_count;
- stats->dequeued_count += qp->qp_stats.dequeued_count;
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
- stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
- stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
}
}
@@ -93,7 +93,7 @@ aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
}
}
@@ -196,7 +196,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp->sess_mp = dev->data->session_pool;
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
/* Initialise multi-buffer manager */
(*qp->ops->job.init_mgr)(&qp->mb_mgr);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index ab70c15..0aed177 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -150,7 +150,7 @@ struct aesni_mb_qp {
/**< Ring for placing process packets */
struct rte_mempool *sess_mp;
/**< Session Mempool */
- struct rte_cryptodev_stats qp_stats;
+ struct rte_cryptodev_stats stats;
/**< Queue pair statistics */
} __rte_cache_aligned;
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index e7b9027..38dc956 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -59,7 +59,6 @@
#include <rte_mbuf.h>
#include <rte_string_fns.h>
#include <rte_spinlock.h>
-#include <rte_mbuf_offload.h>
#include <rte_hexdump.h>
#include "qat_logs.h"
@@ -72,7 +71,7 @@ static inline uint32_t
adf_modulo(uint32_t data, uint32_t shift);
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg);
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
void *session)
@@ -275,15 +274,16 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
register struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
register uint32_t nb_pkts_sent = 0;
- register struct rte_mbuf **cur_tx_pkt = tx_pkts;
+ register struct rte_crypto_op **cur_op = ops;
register int ret;
- uint16_t nb_pkts_possible = nb_pkts;
+ uint16_t nb_ops_possible = nb_ops;
register uint8_t *base_addr;
register uint32_t tail;
int overflow;
@@ -294,19 +294,17 @@ uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = queue->tail;
/* Find how many can actually fit on the ring */
- overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_pkts)
+ overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
- queue->max_inflights;
if (overflow > 0) {
rte_atomic16_sub(&tmp_qp->inflights16, overflow);
- nb_pkts_possible = nb_pkts - overflow;
- if (nb_pkts_possible == 0)
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
return 0;
}
- while (nb_pkts_sent != nb_pkts_possible) {
-
- ret = qat_alg_write_mbuf_entry(*cur_tx_pkt,
- base_addr + tail);
+ while (nb_pkts_sent != nb_ops_possible) {
+ ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
if (nb_pkts_sent == 0)
@@ -316,7 +314,7 @@ uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = adf_modulo(tail + queue->msg_size, queue->modulo);
nb_pkts_sent++;
- cur_tx_pkt++;
+ cur_op++;
}
kick_tail:
WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
@@ -327,14 +325,13 @@ kick_tail:
}
uint16_t
-qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
uint32_t msg_counter = 0;
- struct rte_mbuf *rx_mbuf;
+ struct rte_crypto_op *rx_op;
struct icp_qat_fw_comn_resp *resp_msg;
queue = &(tmp_qp->rx_q);
@@ -342,17 +339,20 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
((uint8_t *)queue->base_addr + queue->head);
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
- msg_counter != nb_pkts) {
- rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
+ msg_counter != nb_ops) {
+ rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
*(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
queue->head = adf_modulo(queue->head +
@@ -361,9 +361,8 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
resp_msg = (struct icp_qat_fw_comn_resp *)
((uint8_t *)queue->base_addr +
queue->head);
-
- *rx_pkts = rx_mbuf;
- rx_pkts++;
+ *ops = rx_op;
+ ops++;
msg_counter++;
}
if (msg_counter > 0) {
@@ -377,38 +376,36 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
}
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
{
- struct rte_mbuf_offload *ol;
-
struct qat_session *ctx;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
- if (unlikely(ol == NULL)) {
- PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
- "to (%p) mbuf.", mbuf);
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
return -EINVAL;
}
-
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
+#endif
+ if (unlikely(op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
- " requests mbuf (%p) is sessionless.", mbuf);
+ " requests, op (%p) is sessionless.", op);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type
- != RTE_CRYPTODEV_QAT_SYM_PMD)) {
+ if (unlikely(op->sym->session->type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
- ctx = (struct qat_session *)ol->op.crypto.session->_private;
+ ctx = (struct qat_session *)op->sym->session->_private;
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
*qat_req = ctx->fw_req;
- qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)mbuf;
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
/*
* The following code assumes:
@@ -416,37 +413,37 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
* - always in place.
*/
qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length = mbuf->data_len;
+ qat_req->comn_mid.src_length =
+ rte_pktmbuf_data_len(op->sym->m_src);
qat_req->comn_mid.dest_data_addr =
qat_req->comn_mid.src_data_addr =
- rte_pktmbuf_mtophys(mbuf);
-
+ rte_pktmbuf_mtophys(op->sym->m_src);
cipher_param = (void *)&qat_req->serv_specif_rqpars;
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
- cipher_param->cipher_length = ol->op.crypto.data.to_cipher.length;
- cipher_param->cipher_offset = ol->op.crypto.data.to_cipher.offset;
- if (ol->op.crypto.iv.length &&
- (ol->op.crypto.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array))) {
+ cipher_param->cipher_length = op->sym->cipher.data.length;
+ cipher_param->cipher_offset = op->sym->cipher.data.offset;
+ if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array))) {
rte_memcpy(cipher_param->u.cipher_IV_array,
- ol->op.crypto.iv.data, ol->op.crypto.iv.length);
+ op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
} else {
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr = ol->op.crypto.iv.phys_addr;
+ cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr;
}
- if (ol->op.crypto.digest.phys_addr) {
+ if (op->sym->auth.digest.phys_addr) {
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
- auth_param->auth_res_addr = ol->op.crypto.digest.phys_addr;
+ auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
}
- auth_param->auth_off = ol->op.crypto.data.to_hash.offset;
- auth_param->auth_len = ol->op.crypto.data.to_hash.length;
- auth_param->u1.aad_adr = ol->op.crypto.additional_auth.phys_addr;
+ auth_param->auth_off = op->sym->auth.data.offset;
+ auth_param->auth_len = op->sym->auth.data.length;
+ auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
/* (GCM) aad length(240 max) will be at this location after precompute */
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
@@ -457,9 +454,19 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
}
auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));
+ rte_hexdump(stdout, "src_data:",
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ op->sym->auth.digest.length);
+ rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
+ op->sym->auth.aad.length);
#endif
return 0;
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index e9f71fe..9323383 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -115,12 +115,12 @@ extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
-uint16_t
-qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
-uint16_t
-qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 85700fc..5e51aca 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -92,8 +92,8 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst;
internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 93de786..6d5aeeb 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -62,7 +62,6 @@
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_memcpy.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -85,6 +84,7 @@
*/
#define RTE_TEST_RX_DESC_DEFAULT 128
#define RTE_TEST_TX_DESC_DEFAULT 512
+
static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
@@ -104,6 +104,11 @@ struct pkt_buffer {
struct rte_mbuf *buffer[MAX_PKT_BURST];
};
+struct op_buffer {
+ unsigned len;
+ struct rte_crypto_op *buffer[MAX_PKT_BURST];
+};
+
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
@@ -112,6 +117,12 @@ enum l2fwd_crypto_xform_chain {
L2FWD_CRYPTO_HASH_CIPHER
};
+struct l2fwd_key {
+ uint8_t *data;
+ uint32_t length;
+ phys_addr_t phys_addr;
+};
+
/** l2fwd crypto application command line options */
struct l2fwd_crypto_options {
unsigned portmask;
@@ -127,7 +138,7 @@ struct l2fwd_crypto_options {
struct rte_crypto_sym_xform cipher_xform;
uint8_t ckey_data[32];
- struct rte_crypto_sym_key iv_key;
+ struct l2fwd_key iv_key;
uint8_t ivkey_data[16];
struct rte_crypto_sym_xform auth_xform;
@@ -142,10 +153,7 @@ struct l2fwd_crypto_params {
unsigned digest_length;
unsigned block_size;
- struct {
- uint8_t *data;
- uint16_t length;
- } iv;
+ struct l2fwd_key iv_key;
struct rte_cryptodev_sym_session *session;
};
@@ -157,14 +165,16 @@ struct lcore_queue_conf {
unsigned nb_crypto_devs;
unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
- struct pkt_buffer crypto_pkt_buf[RTE_MAX_ETHPORTS];
- struct pkt_buffer tx_pkt_buf[RTE_MAX_ETHPORTS];
+ struct op_buffer op_buf[RTE_MAX_ETHPORTS];
+ struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
} __rte_cache_aligned;
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
static const struct rte_eth_conf port_conf = {
.rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+ .max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
@@ -178,7 +188,7 @@ static const struct rte_eth_conf port_conf = {
};
struct rte_mempool *l2fwd_pktmbuf_pool;
-struct rte_mempool *l2fwd_mbuf_ol_pool;
+struct rte_mempool *l2fwd_crypto_op_pool;
/* Per-port statistics struct */
struct l2fwd_port_statistics {
@@ -203,7 +213,7 @@ struct l2fwd_crypto_statistics crypto_statistics[RTE_MAX_ETHPORTS];
/* A tsc-based timer responsible for triggering statistics printout */
#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
-#define MAX_TIMER_PERIOD 86400 /* 1 day max */
+#define MAX_TIMER_PERIOD 86400UL /* 1 day max */
/* default period is 10 seconds */
static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000;
@@ -292,20 +302,21 @@ static int
l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
struct l2fwd_crypto_params *cparams)
{
- struct rte_mbuf **pkt_buffer;
+ struct rte_crypto_op **op_buffer;
unsigned ret;
- pkt_buffer = (struct rte_mbuf **)
- qconf->crypto_pkt_buf[cparams->dev_id].buffer;
+ op_buffer = (struct rte_crypto_op **)
+ qconf->op_buf[cparams->dev_id].buffer;
+
+ ret = rte_cryptodev_enqueue_burst(cparams->dev_id,
+ cparams->qp_id, op_buffer, (uint16_t) n);
- ret = rte_cryptodev_enqueue_burst(cparams->dev_id, cparams->qp_id,
- pkt_buffer, (uint16_t) n);
crypto_statistics[cparams->dev_id].enqueued += ret;
if (unlikely(ret < n)) {
crypto_statistics[cparams->dev_id].errors += (n - ret);
do {
- rte_pktmbuf_offload_free(pkt_buffer[ret]->offload_ops);
- rte_pktmbuf_free(pkt_buffer[ret]);
+ rte_pktmbuf_free(op_buffer[ret]->sym->m_src);
+ rte_crypto_op_free(op_buffer[ret]);
} while (++ret < n);
}
@@ -313,7 +324,8 @@ l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
}
static int
-l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
+l2fwd_crypto_enqueue(struct rte_crypto_op *op,
+ struct l2fwd_crypto_params *cparams)
{
unsigned lcore_id, len;
struct lcore_queue_conf *qconf;
@@ -321,23 +333,23 @@ l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->crypto_pkt_buf[cparams->dev_id].len;
- qconf->crypto_pkt_buf[cparams->dev_id].buffer[len] = m;
+ len = qconf->op_buf[cparams->dev_id].len;
+ qconf->op_buf[cparams->dev_id].buffer[len] = op;
len++;
- /* enough pkts to be sent */
+ /* enough ops to be sent */
if (len == MAX_PKT_BURST) {
l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams);
len = 0;
}
- qconf->crypto_pkt_buf[cparams->dev_id].len = len;
+ qconf->op_buf[cparams->dev_id].len = len;
return 0;
}
static int
l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
- struct rte_mbuf_offload *ol,
+ struct rte_crypto_op *op,
struct l2fwd_crypto_params *cparams)
{
struct ether_hdr *eth_hdr;
@@ -375,43 +387,43 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_op_attach_sym_session(op, cparams->session);
/* Append space for digest to end of packet */
- ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
+ op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
cparams->digest_length);
- ol->op.crypto.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
- ol->op.crypto.digest.length = cparams->digest_length;
+ op->sym->auth.digest.length = cparams->digest_length;
+
+ op->sym->auth.data.offset = ipdata_offset;
+ op->sym->auth.data.length = data_len;
- ol->op.crypto.iv.data = cparams->iv_key.data;
- ol->op.crypto.iv.phys_addr = cparams->iv_key.phys_addr;
- ol->op.crypto.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_cipher.offset = ipdata_offset;
- ol->op.crypto.data.to_cipher.length = data_len;
+ op->sym->cipher.iv.data = cparams->iv_key.data;
+ op->sym->cipher.iv.phys_addr = cparams->iv_key.phys_addr;
+ op->sym->cipher.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_hash.offset = ipdata_offset;
- ol->op.crypto.data.to_hash.length = data_len;
+ op->sym->cipher.data.offset = ipdata_offset;
+ op->sym->cipher.data.length = data_len;
- rte_pktmbuf_offload_attach(m, ol);
+ op->sym->m_src = m;
- return l2fwd_crypto_enqueue(m, cparams);
+ return l2fwd_crypto_enqueue(op, cparams);
}
/* Send the burst of packets on an output interface */
static int
-l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)
+l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
+ uint8_t port)
{
struct rte_mbuf **pkt_buffer;
unsigned ret;
- unsigned queueid = 0;
- pkt_buffer = (struct rte_mbuf **)qconf->tx_pkt_buf[port].buffer;
+ pkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer;
- ret = rte_eth_tx_burst(port, (uint16_t) queueid, pkt_buffer,
- (uint16_t)n);
+ ret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n);
port_statistics[port].tx += ret;
if (unlikely(ret < n)) {
port_statistics[port].dropped += (n - ret);
@@ -433,8 +445,8 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->tx_pkt_buf[port].len;
- qconf->tx_pkt_buf[port].buffer[len] = m;
+ len = qconf->pkt_buf[port].len;
+ qconf->pkt_buf[port].buffer[len] = m;
len++;
/* enough pkts to be sent */
@@ -443,7 +455,7 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
len = 0;
}
- qconf->tx_pkt_buf[port].len = len;
+ qconf->pkt_buf[port].len = len;
return 0;
}
@@ -503,6 +515,8 @@ static void
l2fwd_main_loop(struct l2fwd_crypto_options *options)
{
struct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST];
+ struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
+
unsigned lcore_id = rte_lcore_id();
uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
unsigned i, j, portid, nb_rx;
@@ -563,12 +577,12 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
if (unlikely(diff_tsc > drain_tsc)) {
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- if (qconf->tx_pkt_buf[portid].len == 0)
+ if (qconf->pkt_buf[portid].len == 0)
continue;
l2fwd_send_burst(&lcore_queue_conf[lcore_id],
- qconf->tx_pkt_buf[portid].len,
+ qconf->pkt_buf[portid].len,
(uint8_t) portid);
- qconf->tx_pkt_buf[portid].len = 0;
+ qconf->pkt_buf[portid].len = 0;
}
/* if timer is enabled */
@@ -597,8 +611,6 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
* Read packet from RX queues
*/
for (i = 0; i < qconf->nb_rx_ports; i++) {
- struct rte_mbuf_offload *ol;
-
portid = qconf->rx_port_list[i];
cparams = &port_cparams[i];
@@ -608,44 +620,49 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
port_statistics[portid].rx += nb_rx;
- /* Enqueue packets from Crypto device*/
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- ol = rte_pktmbuf_offload_alloc(
- l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
+ if (nb_rx) {
/*
- * If we can't allocate a offload, then drop
+ * If we can't allocate a crypto_ops, then drop
* the rest of the burst and dequeue and
* process the packets to free offload structs
*/
- if (unlikely(ol == NULL)) {
- for (; j < nb_rx; j++) {
- rte_pktmbuf_free(pkts_burst[j]);
- port_statistics[portid].dropped++;
- }
- break;
+ if (rte_crypto_op_bulk_alloc(
+ l2fwd_crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops_burst, nb_rx) !=
+ nb_rx) {
+ for (j = 0; j < nb_rx; j++)
+ rte_pktmbuf_free(pkts_burst[i]);
+
+ nb_rx = 0;
}
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- rte_prefetch0((void *)ol);
+ /* Enqueue packets from Crypto device*/
+ for (j = 0; j < nb_rx; j++) {
+ m = pkts_burst[j];
- l2fwd_simple_crypto_enqueue(m, ol, cparams);
+ l2fwd_simple_crypto_enqueue(m,
+ ops_burst[j], cparams);
+ }
}
/* Dequeue packets from Crypto device */
- nb_rx = rte_cryptodev_dequeue_burst(
- cparams->dev_id, cparams->qp_id,
- pkts_burst, MAX_PKT_BURST);
- crypto_statistics[cparams->dev_id].dequeued += nb_rx;
-
- /* Forward crypto'd packets */
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- rte_pktmbuf_offload_free(m->offload_ops);
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- l2fwd_simple_forward(m, portid);
- }
+ do {
+ nb_rx = rte_cryptodev_dequeue_burst(
+ cparams->dev_id, cparams->qp_id,
+ ops_burst, MAX_PKT_BURST);
+
+ crypto_statistics[cparams->dev_id].dequeued +=
+ nb_rx;
+
+ /* Forward crypto'd packets */
+ for (j = 0; j < nb_rx; j++) {
+ m = ops_burst[j]->sym->m_src;
+
+ rte_crypto_op_free(ops_burst[j]);
+ l2fwd_simple_forward(m, portid);
+ }
+ } while (nb_rx == MAX_PKT_BURST);
}
}
}
@@ -748,8 +765,8 @@ parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
/** Parse crypto key command line argument */
static int
-parse_key(struct rte_crypto_sym_key *key __rte_unused,
- unsigned length __rte_unused, char *arg __rte_unused)
+parse_key(struct l2fwd_key *key __rte_unused, unsigned length __rte_unused,
+ char *arg __rte_unused)
{
printf("Currently an unsupported argument!\n");
return -1;
@@ -759,26 +776,20 @@ parse_key(struct rte_crypto_sym_key *key __rte_unused,
static int
parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg)
{
- if (strcmp("SHA1", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA1;
+ if (strcmp("MD5_HMAC", optarg) == 0) {
+ *algo = RTE_CRYPTO_AUTH_MD5_HMAC;
return 0;
} else if (strcmp("SHA1_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
return 0;
- } else if (strcmp("SHA224", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA224;
- return 0;
} else if (strcmp("SHA224_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
return 0;
- } else if (strcmp("SHA256", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA256;
- return 0;
} else if (strcmp("SHA256_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
return 0;
- } else if (strcmp("SHA512", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA256;
+ } else if (strcmp("SHA384_HMAC", optarg) == 0) {
+ *algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
return 0;
} else if (strcmp("SHA512_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
@@ -809,7 +820,7 @@ static int
l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
struct option *lgopts, int option_index)
{
- if (strcmp(lgopts[option_index].name, "cdev_type") == 0)
+ if (strcmp(lgopts[option_index].name, "cdev") == 0)
return parse_cryptodev_type(&options->cdev_type, optarg);
else if (strcmp(lgopts[option_index].name, "chain") == 0)
@@ -824,11 +835,17 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return parse_cipher_op(&options->cipher_xform.cipher.op,
optarg);
- else if (strcmp(lgopts[option_index].name, "cipher_key") == 0)
- return parse_key(&options->cipher_xform.cipher.key,
- sizeof(options->ckey_data), optarg);
+ else if (strcmp(lgopts[option_index].name, "cipher_key") == 0) {
+ struct l2fwd_key key = { 0 };
+ int retval = 0;
+
+ retval = parse_key(&key, sizeof(options->ckey_data), optarg);
- else if (strcmp(lgopts[option_index].name, "iv") == 0)
+ options->cipher_xform.cipher.key.data = key.data;
+ options->cipher_xform.cipher.key.length = key.length;
+
+ return retval;
+ } else if (strcmp(lgopts[option_index].name, "iv") == 0)
return parse_key(&options->iv_key, sizeof(options->ivkey_data),
optarg);
@@ -841,11 +858,17 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return parse_auth_op(&options->auth_xform.auth.op,
optarg);
- else if (strcmp(lgopts[option_index].name, "auth_key") == 0)
- return parse_key(&options->auth_xform.auth.key,
- sizeof(options->akey_data), optarg);
+ else if (strcmp(lgopts[option_index].name, "auth_key") == 0) {
+ struct l2fwd_key key = { 0 };
+ int retval = 0;
+
+ retval = parse_key(&key, sizeof(options->akey_data), optarg);
- else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
+ options->auth_xform.auth.key.data = key.data;
+ options->auth_xform.auth.key.length = key.length;
+
+ return retval;
+ } else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
options->sessionless = 1;
return 0;
}
@@ -905,16 +928,16 @@ l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options,
const char *q_arg)
{
char *end = NULL;
- long int n;
+ unsigned long n;
/* parse number string */
- n = strtol(q_arg, &end, 10);
+ n = (unsigned)strtol(q_arg, &end, 10);
if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
n = 0;
if (n >= MAX_TIMER_PERIOD) {
- printf("Warning refresh period specified %ld is greater than "
- "max value %d! using max value",
+ printf("Warning refresh period specified %lu is greater than "
+ "max value %lu! using max value",
n, MAX_TIMER_PERIOD);
n = MAX_TIMER_PERIOD;
}
@@ -934,13 +957,13 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
options->nb_ports_per_lcore = 1;
options->refresh_period = 10000;
options->single_lcore = 0;
+ options->sessionless = 0;
options->cdev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
- options->sessionless = 0;
options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
/* Cipher Data */
- options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ options->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
options->cipher_xform.next = NULL;
options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -949,12 +972,11 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
generate_random_key(options->ckey_data, sizeof(options->ckey_data));
options->cipher_xform.cipher.key.data = options->ckey_data;
- options->cipher_xform.cipher.key.phys_addr = 0;
options->cipher_xform.cipher.key.length = 16;
/* Authentication Data */
- options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ options->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
options->auth_xform.next = NULL;
options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
@@ -966,7 +988,6 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
generate_random_key(options->akey_data, sizeof(options->akey_data));
options->auth_xform.auth.key.data = options->akey_data;
- options->auth_xform.auth.key.phys_addr = 0;
options->auth_xform.auth.key.length = 20;
}
@@ -993,39 +1014,6 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
printf("sessionless crypto: %s\n",
options->sessionless ? "enabled" : "disabled");
-#if 0
- options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
-
- /* Cipher Data */
- options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
- options->cipher_xform.next = NULL;
-
- options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- options->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
-
- generate_random_key(options->ckey_data, sizeof(options->ckey_data));
-
- options->cipher_xform.cipher.key.data = options->ckey_data;
- options->cipher_xform.cipher.key.phys_addr = 0;
- options->cipher_xform.cipher.key.length = 16;
-
-
- /* Authentication Data */
- options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
- options->auth_xform.next = NULL;
-
- options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
-
- options->auth_xform.auth.add_auth_data_length = 0;
- options->auth_xform.auth.digest_length = 20;
-
- generate_random_key(options->akey_data, sizeof(options->akey_data));
-
- options->auth_xform.auth.key.data = options->akey_data;
- options->auth_xform.auth.key.phys_addr = 0;
- options->auth_xform.auth.key.length = 20;
-#endif
}
/* Parse the argument given in the command line of the application */
@@ -1039,7 +1027,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
static struct option lgopts[] = {
{ "sessionless", no_argument, 0, 0 },
- { "cdev_type", required_argument, 0, 0 },
+ { "cdev", required_argument, 0, 0 },
{ "chain", required_argument, 0, 0 },
{ "cipher_algo", required_argument, 0, 0 },
@@ -1053,6 +1041,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
{ "iv", required_argument, 0, 0 },
{ "sessionless", no_argument, 0, 0 },
+
{ NULL, 0, 0, 0 }
};
@@ -1370,15 +1359,17 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n");
/* create the mbuf pool */
- l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 128,
- 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512,
+ sizeof(struct rte_crypto_op),
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
if (l2fwd_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
/* create crypto op pool */
- l2fwd_mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "mbuf_offload_pool", NB_MBUF, 128, 0, rte_socket_id());
- if (l2fwd_mbuf_ol_pool == NULL)
+ l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, 0,
+ rte_socket_id());
+ if (l2fwd_crypto_op_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
/* Enable Ethernet ports */
diff --git a/lib/Makefile b/lib/Makefile
index 6840f87..f254dba 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -36,7 +36,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_EAL) += librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_RING) += librte_ring
DIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += librte_mempool
DIRS-$(CONFIG_RTE_LIBRTE_MBUF) += librte_mbuf
-DIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += librte_mbuf_offload
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += librte_timer
DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile
DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index df0c0b8..e0c4fb9 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -44,7 +44,369 @@
extern "C" {
#endif
-#include <rte_crypto_sym.h>
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+#include "rte_crypto_sym.h"
+
+/** Crypto operation types */
+enum rte_crypto_op_type {
+ RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ /**< Undefined operation type */
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ /**< Symmetric operation */
+};
+
+/** Status of crypto operation */
+enum rte_crypto_op_status {
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ /**< Operation completed successfully */
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+ /**< Operation has not yet been processed by a crypto device */
+ RTE_CRYPTO_OP_STATUS_ENQUEUED,
+ /**< Operation is enqueued on device */
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
+ /**< Authentication verification failed */
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION,
+ /**<
+ * Symmetric operation failed due to invalid session arguments, or if
+ * in session-less mode, failed to allocate private operation material.
+ */
+ RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
+ /**< Operation failed due to invalid arguments in request */
+ RTE_CRYPTO_OP_STATUS_ERROR,
+ /**< Error handling operation */
+};
+
+/**
+ * Cryptographic Operation.
+ *
+ * This structure contains data relating to performing cryptographic
+ * operations. This operation structure is used to contain any operation which
+ * is supported by the cryptodev API, PMDs should check the type parameter to
+ * verify that the operation is a support function of the device. Crypto
+ * operations are enqueued and dequeued in crypto PMDs using the
+ * rte_cryptodev_enqueue_burst() / rte_cryptodev_dequeue_burst() .
+ */
+struct rte_crypto_op {
+ enum rte_crypto_op_type type;
+ /**< operation type */
+
+ enum rte_crypto_op_status status;
+ /**<
+ * operation status - this is reset to
+ * RTE_CRYPTO_OP_STATUS_NOT_PROCESSED on allocation from mempool and
+ * will be set to RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation
+ * is successfully processed by a crypto PMD
+ */
+
+ struct rte_mempool *mempool;
+ /**< crypto operation mempool which operation is allocated from */
+
+ phys_addr_t phys_addr;
+ /**< physical address of crypto operation */
+
+ void *opaque_data;
+ /**< Opaque pointer for user data */
+
+ union {
+ struct rte_crypto_sym_op *sym;
+ /**< Symmetric operation parameters */
+ }; /**< operation specific parameters */
+} __rte_cache_aligned;
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ * @param type The crypto operation type.
+ */
+static inline void
+__rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type)
+{
+ op->type = type;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (type) {
+ case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
+ /** Symmetric operation structure starts after the end of the
+ * rte_crypto_op structure.
+ */
+ op->sym = (struct rte_crypto_sym_op *)(op + 1);
+ op->type = type;
+
+ __rte_crypto_sym_op_reset(op->sym);
+ break;
+ default:
+ break;
+ }
+
+ op->opaque_data = NULL;
+}
+
+/**
+ * Private data structure belonging to a crypto symmetric operation pool.
+ */
+struct rte_crypto_op_pool_private {
+ enum rte_crypto_op_type type;
+ /**< Crypto op pool type operation. */
+ uint16_t priv_size;
+ /**< Size of private area in each crypto operation. */
+};
+
+
+/**
+ * Returns the size of private data allocated with each rte_crypto_op object by
+ * the mempool
+ *
+ * @param mempool rte_crypto_op mempool
+ *
+ * @return private data size
+ */
+static inline uint16_t
+__rte_crypto_op_get_priv_data_size(struct rte_mempool *mempool)
+{
+ struct rte_crypto_op_pool_private *priv =
+ rte_mempool_get_priv(mempool);
+
+ return priv->priv_size;
+}
+
+
+/**
+ * Creates a crypto operation pool
+ *
+ * @param name pool name
+ * @param type crypto operation type, use
+ * RTE_CRYPTO_OP_TYPE_UNDEFINED for a pool which
+ * supports all operation types
+ * @param nb_elts number of elements in pool
+ * @param cache_size Number of elements to cache on lcore, see
+ * *rte_mempool_create* for further details about
+ * cache size
+ * @param priv_size Size of private data to allocate with each
+ * operation
+ * @param socket_id Socket to allocate memory on
+ *
+ * @return
+ * - On success pointer to mempool
+ * - On failure NULL
+ */
+extern struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id);
+
+/**
+ * Bulk allocate raw element from mempool and return as crypto operations
+ *
+ * @param mempool crypto operation mempool.
+ * @param type crypto operation type.
+ * @param ops Array to place allocated crypto operations
+ * @param nb_ops Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns number of ops allocated
+ */
+static inline int
+__rte_crypto_op_raw_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_crypto_op_pool_private *priv;
+
+ priv = rte_mempool_get_priv(mempool);
+ if (unlikely(priv->type != type &&
+ priv->type != RTE_CRYPTO_OP_TYPE_UNDEFINED))
+ return -EINVAL;
+
+ if (rte_mempool_get_bulk(mempool, (void **)ops, nb_ops) == 0)
+ return nb_ops;
+
+ return 0;
+}
+
+/**
+ * Allocate a crypto operation from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_op *
+rte_crypto_op_alloc(struct rte_mempool *mempool, enum rte_crypto_op_type type)
+{
+ struct rte_crypto_op *op = NULL;
+ int retval;
+
+ retval = __rte_crypto_op_raw_bulk_alloc(mempool, type, &op, 1);
+ if (unlikely(retval != 1))
+ return NULL;
+
+ __rte_crypto_op_reset(op, type);
+
+ return op;
+}
+
+
+/**
+ * Bulk allocate crypto operations from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ * @param ops Array to place allocated crypto operations
+ * @param nb_ops Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+
+static inline unsigned
+rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ if (unlikely(__rte_crypto_op_raw_bulk_alloc(mempool, type, ops, nb_ops)
+ != nb_ops))
+ return 0;
+
+ for (i = 0; i < nb_ops; i++)
+ __rte_crypto_op_reset(ops[i], type);
+
+ return nb_ops;
+}
+
+
+
+/**
+ * Returns a pointer to the private data of a crypto operation if
+ * that operation has enough capacity for requested size.
+ *
+ * @param op crypto operation.
+ * @param size size of space requested in private data.
+ *
+ * @returns
+ * - if sufficient space available returns pointer to start of private data
+ * - if insufficient space returns NULL
+ */
+static inline void *
+__rte_crypto_op_get_priv_data(struct rte_crypto_op *op, uint32_t size)
+{
+ uint32_t priv_size;
+
+ if (likely(op->mempool != NULL)) {
+ priv_size = __rte_crypto_op_get_priv_data_size(op->mempool);
+
+ if (likely(priv_size >= size))
+ return (void *)((uint8_t *)(op + 1) +
+ sizeof(struct rte_crypto_sym_op));
+ }
+
+ return NULL;
+}
+
+/**
+ * free crypto operation structure
+ * If operation has been allocate from a rte_mempool, then the operation will
+ * be returned to the mempool.
+ *
+ * @param op symmetric crypto operation
+ */
+static inline void
+rte_crypto_op_free(struct rte_crypto_op *op)
+{
+ if (op != NULL && op->mempool != NULL)
+ rte_mempool_put(op->mempool, op);
+}
+
+/**
+ * Allocate a symmetric crypto operation in the private data of an mbuf.
+ *
+ * @param m mbuf which is associated with the crypto operation, the
+ * operation will be allocated in the private data of that
+ * mbuf.
+ *
+ * @returns
+ * - On success returns a pointer to the crypto operation.
+ * - On failure returns NULL.
+ */
+static inline struct rte_crypto_op *
+rte_crypto_sym_op_alloc_from_mbuf_priv_data(struct rte_mbuf *m)
+{
+ if (unlikely(m == NULL))
+ return NULL;
+
+ /*
+ * check that the mbuf's private data size is sufficient to contain a
+ * crypto operation
+ */
+ if (unlikely(m->priv_size < (sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op))))
+ return NULL;
+
+ /* private data starts immediately after the mbuf header in the mbuf. */
+ struct rte_crypto_op *op = (struct rte_crypto_op *)(m + 1);
+
+ __rte_crypto_op_reset(op, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+
+ op->mempool = NULL;
+ op->sym->m_src = m;
+
+ return op;
+}
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type and configures
+ * the chaining of the xforms in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+rte_crypto_op_sym_xforms_alloc(struct rte_crypto_op *op, uint8_t nb_xforms)
+{
+ void *priv_data;
+ uint32_t size;
+
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return NULL;
+
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
+
+ priv_data = __rte_crypto_op_get_priv_data(op, size);
+ if (priv_data == NULL)
+ return NULL;
+
+ return __rte_crypto_sym_op_sym_xforms_alloc(op->sym, priv_data,
+ nb_xforms);
+}
+
+
+/**
+ * Attach a session to a crypto operation
+ *
+ * @param op crypto operation, must be of type symmetric
+ * @param sess cryptodev session
+ */
+static inline int
+rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return -1;
+
+ return __rte_crypto_sym_op_attach_sym_session(op->sym, sess);
+}
#ifdef __cplusplus
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index cb2b8f6..831bbf1 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -46,6 +46,8 @@
extern "C" {
#endif
+#include <string.h>
+
#include <rte_mbuf.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -346,37 +348,24 @@ enum rte_crypto_sym_op_sess_type {
};
-/** Status of crypto operation */
-enum rte_crypto_op_status {
- RTE_CRYPTO_OP_STATUS_SUCCESS,
- /**< Operation completed successfully */
- RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
- /**< Operation not yet submitted to a cryptodev */
- RTE_CRYPTO_OP_STATUS_ENQUEUED,
- /**< Operation is enqueued on device */
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
- /**< Authentication verification failed */
- RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
- /**< Operation failed due to invalid arguments in request */
- RTE_CRYPTO_OP_STATUS_ERROR,
- /**< Error handling operation */
-};
+struct rte_cryptodev_sym_session;
/**
- * Cryptographic Operation Data.
+ * Symmetric Cryptographic Operation.
+ *
+ * This structure contains data relating to performing symmetric cryptographic
+ * processing on a referenced mbuf data buffer.
*
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
- * call for performing cipher, hash, or a combined hash and cipher operations.
+ * When a symmetric crypto operation is enqueued with the device for processing
+ * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
+ * which contains the source data which the crypto operation is to be performed
+ * on.
*/
struct rte_crypto_sym_op {
- enum rte_crypto_sym_op_sess_type type;
- enum rte_crypto_op_status status;
+ struct rte_mbuf *m_src; /**< source mbuf */
+ struct rte_mbuf *m_dst; /**< destination mbuf */
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
+ enum rte_crypto_sym_op_sess_type type;
union {
struct rte_cryptodev_sym_session *session;
@@ -387,7 +376,7 @@ struct rte_crypto_sym_op {
struct {
struct {
- uint32_t offset;
+ uint32_t offset;
/**< Starting point for cipher processing, specified
* as number of bytes from start of data in the source
* buffer. The result of the cipher operation will be
@@ -395,7 +384,7 @@ struct rte_crypto_sym_op {
* this location.
*/
- uint32_t length;
+ uint32_t length;
/**< The message length, in bytes, of the source buffer
* on which the cryptographic operation will be
* computed. This must be a multiple of the block size
@@ -414,17 +403,68 @@ struct rte_crypto_sym_op {
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
* field should be set to 0.
*/
- } to_cipher; /**< Data offsets and length for ciphering */
+ } data; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * Initialisation Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length
+ * is 96 bits) or J0 (for other sizes), where J0 is as
+ * defined by NIST SP800-38D. Regardless of the IV
+ * length, a full 16 bytes needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the
+ * nonce should be written starting at &iv[1] (to allow
+ * space for the implementation to write in the flags
+ * in the first byte). Note that a full 16 bytes should
+ * be allocated, even though the length field will
+ * have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from
+ * IEEE Std 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD
+ * be 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ uint16_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * length of the IV (which must be the same as the
+ * block length of the cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length
+ * of the counter (which must be the same as the block
+ * length of the cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs)
+ * or 16, in which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce,
+ * which can be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+ } cipher;
+ struct {
struct {
- uint32_t offset;
+ uint32_t offset;
/**< Starting point for hash processing, specified as
* number of bytes from start of packet in source
* buffer.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
+ * ignored. The field @ref aad field
* should be set instead.
*
* @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
@@ -432,180 +472,169 @@ struct rte_crypto_sym_op {
* of the AAD data in the source buffer.
*/
- uint32_t length;
+ uint32_t length;
/**< The message length, in bytes, of the source
* buffer that the hash will be computed on.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
+ * ignored. The field @ref aad field should be set
+ * instead.
*
* @note
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
* of operation, this field specifies the length of
* the AAD data in the source buffer.
*/
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
-
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
+ } data; /**< Data offsets and length for authentication */
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result
+ * should be inserted (in the case of digest generation)
+ * or where the purported digest exists (in the case of
+ * digest verification).
+ *
+ * At session creation time, the client specified the
+ * digest result length with the digest_length member
+ * of the @ref rte_crypto_auth_xform structure. For
+ * physical crypto devices the caller must allocate at
+ * least digest_length of physically contiguous memory
+ * at this location.
+ *
+ * For digest generation, the digest result will
+ * overwrite any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is
+ * understood to be in the destination buffer for
+ * digest generation, and in the source buffer for
+ * digest verification. The location of the digest
+ * result in this case is immediately following the
+ * region over which the digest is computed.
+ */
+ phys_addr_t phys_addr;
+ /**< Physical address of digest */
+ uint16_t length;
+ /**< Length of digest */
+ } digest; /**< Digest parameters */
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_sym_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
-
- void *user_data;
- /**< opaque pointer for user data */
-};
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD)
+ * needed for authenticated cipher mechanisms (CCM and
+ * GCM), and to the IV for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is
+ * set up for the session in the @ref
+ * rte_crypto_auth_xform structure as part of the @ref
+ * rte_cryptodev_session_create function call. This
+ * length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM),
+ * the caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset
+ * of one byte into the array, leaving room for the
+ * implementation to write in the flags to the first
+ * byte.
+ *
+ * - the additional authentication data itself should
+ * be written starting at an offset of 18 bytes into
+ * the array, leaving room for the length encoding in
+ * the first two bytes of the second block.
+ *
+ * - the array should be big enough to hold the above
+ * fields, plus any padding to round this up to the
+ * nearest multiple of the block size (16 bytes).
+ * Padding will be added by the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus
+ * any space to round this up to the nearest multiple
+ * of the block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set
+ * to 0. Instead the AAD data should be placed in the
+ * source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint16_t length; /**< Length of digest */
+ } aad;
+ /**< Additional authentication parameters */
+ } auth;
+} __rte_cache_aligned;
/**
- * Reset the fields of a crypto operation to their default values.
+ * Reset the fields of a symmetric operation to their default values.
*
* @param op The crypto operation to be reset.
*/
static inline void
__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
{
+ memset(op, 0, sizeof(*op));
+
op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
}
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type to
+ * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
+ * in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+__rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
+ void *priv_data, uint8_t nb_xforms)
+{
+ struct rte_crypto_sym_xform *xform;
+
+ sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
+
+ do {
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
+ xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
+ } while (xform);
+
+ return sym_op->xform;
+}
+
+
+/**
+ * Attach a session to a symmetric crypto operation
+ *
+ * @param op crypto operation
+ * @param sess cryptodev session
+ */
+static inline int
+__rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
struct rte_cryptodev_sym_session *sess)
{
- op->session = sess;
- op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+ sym_op->session = sess;
+ sym_op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+
+ return 0;
}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index c7fef6a..4632ca3 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1056,3 +1056,79 @@ rte_cryptodev_sym_session_free(uint8_t dev_id,
return NULL;
}
+
+/** Initialise rte_crypto_op mempool element */
+static void
+rte_crypto_op_init(struct rte_mempool *mempool,
+ void *opaque_arg,
+ void *_op_data,
+ __rte_unused unsigned i)
+{
+ struct rte_crypto_op *op = _op_data;
+ enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
+
+ memset(_op_data, 0, mempool->elt_size);
+
+ __rte_crypto_op_reset(op, type);
+
+ op->phys_addr = rte_mem_virt2phy(_op_data);
+ op->mempool = mempool;
+}
+
+
+struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id)
+{
+ struct rte_crypto_op_pool_private *priv;
+
+ unsigned elt_size = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) +
+ priv_size;
+
+ /* lookup mempool in case already allocated */
+ struct rte_mempool *mp = rte_mempool_lookup(name);
+
+ if (mp != NULL) {
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ if (mp->elt_size != elt_size ||
+ mp->cache_size < cache_size ||
+ mp->size < nb_elts ||
+ priv->priv_size < priv_size) {
+ mp = NULL;
+ CDEV_LOG_ERR("Mempool %s already exists but with "
+ "incompatible parameters", name);
+ return NULL;
+ }
+ return mp;
+ }
+
+ mp = rte_mempool_create(
+ name,
+ nb_elts,
+ elt_size,
+ cache_size,
+ sizeof(struct rte_crypto_op_pool_private),
+ NULL,
+ NULL,
+ rte_crypto_op_init,
+ &type,
+ socket_id,
+ 0);
+
+ if (mp == NULL) {
+ CDEV_LOG_ERR("Failed to create mempool %s", name);
+ return NULL;
+ }
+
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ priv->priv_size = priv_size;
+ priv->type = type;
+
+ return mp;
+}
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 732e2b9..d9b10b4 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -40,16 +40,14 @@
* Defines RTE Crypto Device APIs for the provisioning of cipher and
* authentication operations.
*
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
*/
#ifdef __cplusplus
extern "C" {
#endif
-#include "stddef.h"
-
#include "rte_crypto.h"
#include "rte_dev.h"
@@ -58,15 +56,18 @@ extern "C" {
#define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd")
/**< AES-NI Multi buffer PMD device name */
#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd")
-/**< Intel QAT SYM PMD device name */
+/**< Intel QAT Symmetric Crypto PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
- RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT SYM PMD */
+ RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT PMD Symmetric Crypto */
};
+
+extern const char **rte_cyptodev_names;
+
/* Logging Macros */
#define CDEV_LOG_ERR(fmt, args...) \
@@ -214,8 +215,6 @@ struct rte_cryptodev_config {
/**
* Configure a device.
*
- * EXPERIMENTAL: this API file may change without prior notice
- *
* This function must be invoked first before any other function in the
* API. This function can also be re-invoked when a device is in the
* stopped state.
@@ -411,12 +410,12 @@ rte_cryptodev_callback_unregister(uint8_t dev_id,
rte_cryptodev_cb_fn cb_fn, void *cb_arg);
-typedef uint16_t (*dequeue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Dequeue processed packets from queue pair of a device. */
-typedef uint16_t (*enqueue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Enqueue packets for processing on queue pair of a device. */
@@ -489,66 +488,65 @@ struct rte_cryptodev_data {
extern struct rte_cryptodev *rte_cryptodevs;
/**
*
- * Dequeue a burst of processed packets from a queue of the crypto device.
- * The dequeued packets are stored in *rte_mbuf* structures whose pointers are
- * supplied in the *pkts* array.
+ * Dequeue a burst of processed crypto operations from a queue on the crypto
+ * device. The dequeued operation are stored in *rte_crypto_op* structures
+ * whose pointers are supplied in the *ops* array.
*
- * The rte_crypto_dequeue_burst() function returns the number of packets
- * actually dequeued, which is the number of *rte_mbuf* data structures
- * effectively supplied into the *pkts* array.
+ * The rte_cryptodev_dequeue_burst() function returns the number of ops
+ * actually dequeued, which is the number of *rte_crypto_op* data structures
+ * effectively supplied into the *ops* array.
*
- * A return value equal to *nb_pkts* indicates that the queue contained
- * at least *rx_pkts* packets, and this is likely to signify that other
- * received packets remain in the input queue. Applications implementing
- * a "retrieve as much received packets as possible" policy can check this
- * specific case and keep invoking the rte_crypto_dequeue_burst() function
- * until a value less than *nb_pkts* is returned.
+ * A return value equal to *nb_ops* indicates that the queue contained
+ * at least *nb_ops* operations, and this is likely to signify that other
+ * processed operations remain in the devices output queue. Applications
+ * implementing a "retrieve as many processed operations as possible" policy
+ * can check this specific case and keep invoking the
+ * rte_cryptodev_dequeue_burst() function until a value less than
+ * *nb_ops* is returned.
*
- * The rte_crypto_dequeue_burst() function does not provide any error
+ * The rte_cryptodev_dequeue_burst() function does not provide any error
* notification to avoid the corresponding overhead.
*
- * @param dev_id The identifier of the device.
+ * @param dev_id The symmetric crypto device identifier
* @param qp_id The index of the queue pair from which to
* retrieve processed packets. The value must be
* in the range [0, nb_queue_pair - 1] previously
* supplied to rte_cryptodev_configure().
- * @param pkts The address of an array of pointers to
- * *rte_mbuf* structures that must be large enough
- * to store *nb_pkts* pointers in it.
- * @param nb_pkts The maximum number of packets to dequeue.
+ * @param ops The address of an array of pointers to
+ * *rte_crypto_op* structures that must be
+ * large enough to store *nb_ops* pointers in it.
+ * @param nb_ops The maximum number of operations to dequeue.
*
* @return
- * - The number of packets actually dequeued, which is the number
- * of pointers to *rte_mbuf* structures effectively supplied to the
- * *pkts* array.
+ * - The number of operations actually dequeued, which is the number
+ * of pointers to *rte_crypto_op* structures effectively supplied to the
+ * *ops* array.
*/
static inline uint16_t
rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
- nb_pkts = (*dev->dequeue_burst)
- (dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ nb_ops = (*dev->dequeue_burst)
+ (dev->data->queue_pairs[qp_id], ops, nb_ops);
- return nb_pkts;
+ return nb_ops;
}
/**
- * Enqueue a burst of packets for processing on a crypto device.
+ * Enqueue a burst of operations for processing on a crypto device.
*
- * The rte_crypto_enqueue_burst() function is invoked to place packets
- * on the queue *queue_id* of the device designated by its *dev_id*.
+ * The rte_cryptodev_enqueue_burst() function is invoked to place
+ * crypto operations on the queue *qp_id* of the device designated by
+ * its *dev_id*.
*
- * The *nb_pkts* parameter is the number of packets to process which are
- * supplied in the *pkts* array of *rte_mbuf* structures.
+ * The *nb_ops* parameter is the number of operations to process which are
+ * supplied in the *ops* array of *rte_crypto_op* structures.
*
- * The rte_crypto_enqueue_burst() function returns the number of packets it
- * actually sent. A return value equal to *nb_pkts* means that all packets
- * have been sent.
- *
- * Each mbuf in the *pkts* array must have a valid *rte_mbuf_offload* structure
- * attached which contains a valid crypto operation.
+ * The rte_cryptodev_enqueue_burst() function returns the number of
+ * operations it actually enqueued for processing. A return value equal to
+ * *nb_ops* means that all packets have been enqueued.
*
* @param dev_id The identifier of the device.
* @param qp_id The index of the queue pair which packets are
@@ -556,25 +554,25 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
* must be in the range [0, nb_queue_pairs - 1]
* previously supplied to
* *rte_cryptodev_configure*.
- * @param pkts The address of an array of *nb_pkts* pointers
- * to *rte_mbuf* structures which contain the
- * output packets.
- * @param nb_pkts The number of packets to transmit.
+ * @param ops The address of an array of *nb_ops* pointers
+ * to *rte_crypto_op* structures which contain
+ * the crypto operations to be processed.
+ * @param nb_ops The number of operations to process.
*
* @return
- * The number of packets actually enqueued on the crypto device. The return
- * value can be less than the value of the *nb_pkts* parameter when the
- * crypto devices queue is full or has been filled up.
- * The number of packets is 0 if the device hasn't been started.
+ * The number of operations actually enqueued on the crypto device. The return
+ * value can be less than the value of the *nb_ops* parameter when the
+ * crypto devices queue is full or if invalid parameters are specified in
+ * a *rte_crypto_op*.
*/
static inline uint16_t
rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
return (*dev->enqueue_burst)(
- dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ dev->data->queue_pairs[qp_id], ops, nb_ops);
}
@@ -620,7 +618,6 @@ extern struct rte_cryptodev_sym_session *
rte_cryptodev_sym_session_create(uint8_t dev_id,
struct rte_crypto_sym_xform *xform);
-
/**
* Free the memory associated with a previously allocated session.
*
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index a46af6f..b682184 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -1,4 +1,4 @@
-DPDK_2.2 {
+DPDK_16.04 {
global:
rte_cryptodevs;
@@ -27,6 +27,7 @@ DPDK_2.2 {
rte_cryptodev_queue_pair_setup;
rte_cryptodev_queue_pair_start;
rte_cryptodev_queue_pair_stop;
+ rte_crypto_op_pool_create;
local: *;
-};
+};
\ No newline at end of file
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index e39ad28..52f96c3 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -728,9 +728,6 @@ typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */
typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
* with a single assignment */
-/** Opaque rte_mbuf_offload structure declarations */
-struct rte_mbuf_offload;
-
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
@@ -847,9 +844,6 @@ struct rte_mbuf {
/** Timesync flags for use with IEEE1588. */
uint16_t timesync;
-
- /* Chain of off-load operations to perform on mbuf */
- struct rte_mbuf_offload *offload_ops;
} __rte_cache_aligned;
static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
diff --git a/lib/librte_mbuf_offload/Makefile b/lib/librte_mbuf_offload/Makefile
deleted file mode 100644
index acdb449..0000000
--- a/lib/librte_mbuf_offload/Makefile
+++ /dev/null
@@ -1,52 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2015 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_mbuf_offload.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-EXPORT_MAP := rte_mbuf_offload_version.map
-
-LIBABIVER := 1
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) := rte_mbuf_offload.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD)-include := rte_mbuf_offload.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_cryptodev
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.c b/lib/librte_mbuf_offload/rte_mbuf_offload.c
deleted file mode 100644
index 5c0c9dd..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <rte_common.h>
-
-#include "rte_mbuf_offload.h"
-
-/** Initialize rte_mbuf_offload structure */
-static void
-rte_pktmbuf_offload_init(struct rte_mempool *mp,
- __rte_unused void *opaque_arg,
- void *_op_data,
- __rte_unused unsigned i)
-{
- struct rte_mbuf_offload *ol = _op_data;
-
- memset(_op_data, 0, mp->elt_size);
-
- ol->type = RTE_PKTMBUF_OL_NOT_SPECIFIED;
- ol->mp = mp;
-}
-
-
-struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id)
-{
- struct rte_pktmbuf_offload_pool_private *priv;
- unsigned elt_size = sizeof(struct rte_mbuf_offload) + priv_size;
-
-
- /* lookup mempool in case already allocated */
- struct rte_mempool *mp = rte_mempool_lookup(name);
-
- if (mp != NULL) {
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- if (priv->offload_priv_size < priv_size ||
- mp->elt_size != elt_size ||
- mp->cache_size < cache_size ||
- mp->size < size) {
- mp = NULL;
- return NULL;
- }
- return mp;
- }
-
- mp = rte_mempool_create(
- name,
- size,
- elt_size,
- cache_size,
- sizeof(struct rte_pktmbuf_offload_pool_private),
- NULL,
- NULL,
- rte_pktmbuf_offload_init,
- NULL,
- socket_id,
- 0);
-
- if (mp == NULL)
- return NULL;
-
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- priv->offload_priv_size = priv_size;
- return mp;
-}
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
deleted file mode 100644
index 5ce6058..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_MBUF_OFFLOAD_H_
-#define _RTE_MBUF_OFFLOAD_H_
-
-/**
- * @file
- * RTE mbuf offload
- *
- * The rte_mbuf_offload library provides the ability to specify a device generic
- * off-load operation independent of the current Rx/Tx Ethernet offloads
- * supported within the rte_mbuf structure, and add supports for multiple
- * off-load operations and offload device types.
- *
- * The rte_mbuf_offload specifies the particular off-load operation type, such
- * as a crypto operation, and provides a container for the operations
- * parameter's inside the op union. These parameters are then used by the
- * device which supports that operation to perform the specified offload.
- *
- * This library provides an API to create pre-allocated mempool of offload
- * operations, with supporting allocate and free functions. It also provides
- * APIs for attaching an offload to a mbuf, as well as an API to retrieve a
- * specified offload type from an mbuf offload chain.
- *
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- */
-
-#include <rte_mbuf.h>
-#include <rte_crypto.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** packet mbuf offload operation types */
-enum rte_mbuf_ol_op_type {
- RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
- /**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO_SYM
- /**< Crypto offload operation */
-};
-
-/**
- * Generic packet mbuf offload
- * This is used to specify a offload operation to be performed on a rte_mbuf.
- * Multiple offload operations can be chained to the same mbuf, but only a
- * single offload operation of a particular type can be in the chain
- */
-struct rte_mbuf_offload {
- struct rte_mbuf_offload *next; /**< next offload in chain */
- struct rte_mbuf *m; /**< mbuf offload is attached to */
- struct rte_mempool *mp; /**< mempool offload allocated from */
-
- enum rte_mbuf_ol_op_type type; /**< offload type */
- union {
- struct rte_crypto_sym_op crypto; /**< Crypto operation */
- } op;
-};
-
-/**< private data structure belonging to packet mbug offload mempool */
-struct rte_pktmbuf_offload_pool_private {
- uint16_t offload_priv_size;
- /**< Size of private area in each mbuf_offload. */
-};
-
-
-/**
- * Creates a mempool of rte_mbuf_offload objects
- *
- * EXPERIMENTAL: this API file may change without prior notice
- *
- * @param name mempool name
- * @param size number of objects in mempool
- * @param cache_size cache size of objects for each core
- * @param priv_size size of private data to be allocated with each
- * rte_mbuf_offload object
- * @param socket_id Socket on which to allocate mempool objects
- *
- * @return
- * - On success returns a valid mempool of rte_mbuf_offload objects
- * - On failure return NULL
- */
-extern struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id);
-
-
-/**
- * Returns private data size allocated with each rte_mbuf_offload object by
- * the mempool
- *
- * @param mpool rte_mbuf_offload mempool
- *
- * @return private data size
- */
-static inline uint16_t
-__rte_pktmbuf_offload_priv_size(struct rte_mempool *mpool)
-{
- struct rte_pktmbuf_offload_pool_private *priv =
- (struct rte_pktmbuf_offload_pool_private *)rte_mempool_get_priv(mpool);
-
- return priv->offload_priv_size;
-}
-
-/**
- * Get specified off-load operation type from mbuf.
- *
- * @param m packet mbuf.
- * @param type offload operation type requested.
- *
- * @return
- * - On success retruns rte_mbuf_offload pointer
- * - On failure returns NULL
- *
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_get(struct rte_mbuf *m, enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol;
-
- for (ol = m->offload_ops; ol != NULL; ol = ol->next)
- if (ol->type == type)
- return ol;
-
- return ol;
-}
-
-/**
- * Attach a rte_mbuf_offload to a mbuf. We only support a single offload of any
- * one type in our chain of offloads.
- *
- * @param m packet mbuf.
- * @param ol rte_mbuf_offload strucutre to be attached
- *
- * @returns
- * - On success returns the pointer to the offload we just added
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_attach(struct rte_mbuf *m, struct rte_mbuf_offload *ol)
-{
- struct rte_mbuf_offload **ol_last;
-
- for (ol_last = &m->offload_ops; ol_last[0] != NULL;
- ol_last = &ol_last[0]->next)
- if (ol_last[0]->type == ol->type)
- return NULL;
-
- ol_last[0] = ol;
- ol_last[0]->m = m;
- ol_last[0]->next = NULL;
-
- return ol_last[0];
-}
-
-
-/** Rearms rte_mbuf_offload default parameters */
-static inline void
-__rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
- enum rte_mbuf_ol_op_type type)
-{
- ol->m = NULL;
- ol->type = type;
-
- switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO_SYM:
- __rte_crypto_sym_op_reset(&ol->op.crypto); break;
- default:
- break;
- }
-}
-
-/** Allocate rte_mbuf_offload from mempool */
-static inline struct rte_mbuf_offload *
-__rte_pktmbuf_offload_raw_alloc(struct rte_mempool *mp)
-{
- void *buf = NULL;
-
- if (rte_mempool_get(mp, &buf) < 0)
- return NULL;
-
- return (struct rte_mbuf_offload *)buf;
-}
-
-/**
- * Allocate a rte_mbuf_offload with a specified operation type from
- * rte_mbuf_offload mempool
- *
- * @param mpool rte_mbuf_offload mempool
- * @param type offload operation type
- *
- * @returns
- * - On success returns a valid rte_mbuf_offload structure
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_alloc(struct rte_mempool *mpool,
- enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol = __rte_pktmbuf_offload_raw_alloc(mpool);
-
- if (ol != NULL)
- __rte_pktmbuf_offload_reset(ol, type);
-
- return ol;
-}
-
-/**
- * free rte_mbuf_offload structure
- */
-static inline void
-rte_pktmbuf_offload_free(struct rte_mbuf_offload *ol)
-{
- if (ol != NULL && ol->mp != NULL)
- rte_mempool_put(ol->mp, ol);
-}
-
-/**
- * Checks if the private data of a rte_mbuf_offload has enough capacity for
- * requested size
- *
- * @returns
- * - if sufficient space available returns pointer to start of private data
- * - if insufficient space returns NULL
- */
-static inline void *
-__rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
- uint16_t size)
-{
- uint16_t priv_size;
-
- if (likely(ol->mp != NULL)) {
- priv_size = __rte_pktmbuf_offload_priv_size(ol->mp);
-
- if (likely(priv_size >= size))
- return (void *)(ol + 1);
- }
- return NULL;
-}
-
-/**
- * Allocate space for crypto xforms in the private data space of the
- * rte_mbuf_offload. This also defaults the crypto xform type and configures
- * the chaining of the xform in the crypto operation
- *
- * @return
- * - On success returns pointer to first crypto xform in crypto operations chain
- * - On failure returns NULL
- */
-static inline struct rte_crypto_sym_xform *
-rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
- unsigned nb_xforms)
-{
- struct rte_crypto_sym_xform *xform;
- void *priv_data;
- uint16_t size;
-
- size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
- priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
-
- if (priv_data == NULL)
- return NULL;
-
- ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
-
- do {
- xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
- xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
- } while (xform);
-
- return ol->op.crypto.xform;
-}
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MBUF_OFFLOAD_H_ */
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map b/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
deleted file mode 100644
index 3d3b06a..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
+++ /dev/null
@@ -1,7 +0,0 @@
-DPDK_2.2 {
- global:
-
- rte_pktmbuf_offload_pool_create;
-
- local: *;
-};
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v7 0/2] cryptodev API changes
2016-03-04 18:29 ` [dpdk-dev] [PATCH v6 0/2] cryptodev API changes Fiona Trahe
@ 2016-03-07 11:50 ` Fiona Trahe
2016-03-07 13:23 ` De Lara Guarch, Pablo
` (8 more replies)
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 1/2] cryptodev: API tidy and changes to support future extensions Fiona Trahe
` (2 subsequent siblings)
3 siblings, 9 replies; 62+ messages in thread
From: Fiona Trahe @ 2016-03-07 11:50 UTC (permalink / raw)
To: dev
This patch set separates the symmetric crypto operations from generic operations and then modifies the cryptodev burst API to accept bursts of rte_crypto_op rather than rte_mbufs.
v7:
- remove trailing spaces introduced in v6
- rebase against recent config file changes
v6:
- restore EXPERIMENTAL label to cryptodev. Will handle removal in separate thread.
(email subject was incorrect in v5, so v5 hasn't arrived in patchwork, therefore v6 is in-reply-to v4 message id)
V5:
- updates .map file
- removes EXPERIMENTAL label from rte_cryptodev.h
V4:
- Fixes for issues introduced in __rte_crypto_op_raw_bulk_alloc in V3 patcheset.
- Typo fix in cached attribute on rte_crypto_op structure.
V3:
- Addresses V2 comments
- Rebased for head
Declan Doherty (1):
cryptodev: change burst API to be crypto op oriented
Fiona Trahe (2):
cryptodev: API tidy and changes to support future extensions
cryptodev: change burst API to be crypto op oriented
MAINTAINERS | 4 -
app/test/test_cryptodev.c | 894 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 270 ++++---
config/common_base | 7 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 199 ++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 18 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 6 +-
drivers/crypto/qat/qat_crypto.c | 150 ++--
drivers/crypto/qat/qat_crypto.h | 14 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 300 ++++---
lib/Makefile | 1 -
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 819 +++++++------------
lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
lib/librte_cryptodev/rte_cryptodev.h | 189 ++---
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 7 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 -------
.../rte_mbuf_offload_version.map | 7 -
26 files changed, 2145 insertions(+), 2016 deletions(-)
create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
delete mode 100644 lib/librte_mbuf_offload/Makefile
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v7 1/2] cryptodev: API tidy and changes to support future extensions
2016-03-04 18:29 ` [dpdk-dev] [PATCH v6 0/2] cryptodev API changes Fiona Trahe
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 " Fiona Trahe
@ 2016-03-07 11:50 ` Fiona Trahe
2016-03-08 14:10 ` Thomas Monjalon
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 2/2] cryptodev: change burst API to be crypto op oriented Fiona Trahe
2016-03-15 6:46 ` [dpdk-dev] [PATCH v6 0/2] cryptodev API changes Cao, Min
3 siblings, 1 reply; 62+ messages in thread
From: Fiona Trahe @ 2016-03-07 11:50 UTC (permalink / raw)
To: dev
This patch splits symmetric specific definitions and
functions away from the common crypto APIs to facilitate the future extension
and expansion of the cryptodev framework, in order to allow asymmetric
crypto operations to be introduced at a later date, as well as to clean the
logical structure of the public includes. The patch also introduces the _sym
prefix to symmetric specific structure and functions to improve clarity in
the API.
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
---
app/test/test_cryptodev.c | 164 +++---
app/test/test_cryptodev_perf.c | 79 +--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 44 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 6 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 4 +-
drivers/crypto/qat/qat_crypto.c | 51 +-
drivers/crypto/qat/qat_crypto.h | 10 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 33 +-
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 563 +------------------
lib/librte_cryptodev/rte_crypto_sym.h | 613 +++++++++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 39 +-
lib/librte_cryptodev/rte_cryptodev.h | 80 ++-
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 6 +-
lib/librte_mbuf_offload/rte_mbuf_offload.h | 22 +-
17 files changed, 915 insertions(+), 840 deletions(-)
create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 62f8fb0..951b443 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,13 +57,13 @@ struct crypto_testsuite_params {
};
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
struct rte_mbuf_offload *ol;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -78,7 +78,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_param);
@@ -165,7 +165,8 @@ testsuite_setup(void)
ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
"MBUF_OFFLOAD_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_xform),
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -220,7 +221,7 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id,
&ts_params->conf),
@@ -275,7 +276,7 @@ ut_setup(void)
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
ts_params->conf.session_mp.nb_objs =
- (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_PMD) ?
+ (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_SYM_PMD) ?
DEFAULT_NUM_OPS_INFLIGHT :
DEFAULT_NUM_OPS_INFLIGHT;
@@ -319,7 +320,7 @@ ut_teardown(void)
/* free crypto session structure */
if (ut_params->sess) {
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
ut_params->sess);
ut_params->sess = NULL;
}
@@ -464,7 +465,7 @@ test_queue_pair_descriptor_setup(void)
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- ts_params->conf.session_mp.nb_objs = dev_info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = dev_info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
&ts_params->conf), "Failed to configure cryptodev %u",
@@ -766,7 +767,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -776,7 +777,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -786,20 +787,21 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -862,18 +864,18 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_xforms(
+ TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
ut_params->ol, 2),
"failed to allocate space for crypto transforms");
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
/* cipher parameters */
ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
@@ -882,7 +884,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
@@ -958,7 +960,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_SHA1);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -967,7 +969,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -977,13 +979,14 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -991,7 +994,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1066,7 +1069,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1075,7 +1078,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1085,13 +1088,14 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1099,7 +1103,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1168,7 +1172,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_SHA256);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1177,7 +1181,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1187,13 +1191,14 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1201,7 +1206,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1282,7 +1287,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1291,7 +1296,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1301,15 +1306,16 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1317,7 +1323,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1371,7 +1377,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params);
@@ -1386,8 +1392,9 @@ test_AES_CBC_HMAC_SHA512_decrypt_digest_verify(void)
"Failed to create session params");
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
return test_AES_CBC_HMAC_SHA512_decrypt_perform(ut_params->sess,
@@ -1400,7 +1407,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
{
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1409,7 +1416,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1422,7 +1429,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params)
{
@@ -1442,7 +1449,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1450,7 +1457,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1522,7 +1529,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1531,7 +1538,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1541,13 +1548,14 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1555,7 +1563,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)
rte_pktmbuf_prepend(ut_params->ibuf,
@@ -1615,7 +1623,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_AES_XCBC);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1624,7 +1632,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1634,13 +1642,14 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1648,7 +1657,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
@@ -1752,7 +1761,7 @@ test_multi_session(void)
struct crypto_unittest_params *ut_params = &unittest_params;
struct rte_cryptodev_info dev_info;
- struct rte_cryptodev_session **sessions;
+ struct rte_cryptodev_sym_session **sessions;
uint16_t i;
@@ -1761,12 +1770,13 @@ test_multi_session(void)
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- sessions = rte_malloc(NULL, (sizeof(struct rte_cryptodev_session *) *
- dev_info.max_nb_sessions) + 1, 0);
+ sessions = rte_malloc(NULL,
+ (sizeof(struct rte_cryptodev_sym_session *) *
+ dev_info.sym.max_nb_sessions) + 1, 0);
/* Create multiple crypto sessions*/
- for (i = 0; i < dev_info.max_nb_sessions; i++) {
- sessions[i] = rte_cryptodev_session_create(
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++) {
+ sessions[i] = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(sessions[i],
@@ -1781,13 +1791,13 @@ test_multi_session(void)
}
/* Next session create should fail */
- sessions[i] = rte_cryptodev_session_create(ts_params->valid_devs[0],
+ sessions[i] = rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NULL(sessions[i],
"Session creation succeeded unexpectedly!");
- for (i = 0; i < dev_info.max_nb_sessions; i++)
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++)
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
sessions[i]);
rte_free(sessions);
@@ -1806,7 +1816,7 @@ test_not_in_place_crypto(void)
/* Create multiple crypto sessions*/
- ut_params->sess = rte_cryptodev_session_create(
+ ut_params->sess = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0], &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1828,7 +1838,7 @@ test_not_in_place_crypto(void)
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1836,7 +1846,7 @@ test_not_in_place_crypto(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1962,7 +1972,7 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite = {
static int
test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_qat_testsuite);
}
static struct test_command cryptodev_qat_cmd = {
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 728bcf0..42dd9bc 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -63,12 +63,12 @@ struct crypto_testsuite_params {
#define MAX_NUM_OF_OPS_PER_UT (128)
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf_offload *ol;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
@@ -127,7 +127,7 @@ testsuite_setup(void)
ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -179,7 +179,7 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->dev_id,
&ts_params->conf),
@@ -252,7 +252,7 @@ ut_teardown(void)
/* free crypto session structure */
if (ut_params->sess)
- rte_cryptodev_session_free(ts_params->dev_id,
+ rte_cryptodev_sym_session_free(ts_params->dev_id,
ut_params->sess);
/* free crypto operation structure */
@@ -1713,7 +1713,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1723,7 +1723,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1733,7 +1733,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1741,7 +1741,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < num_to_submit ; b++) {
tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ (const char *)data_params[0].expected.ciphertext,
data_params[0].length, 0);
TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
@@ -1753,12 +1753,12 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
DIGEST_BYTE_LENGTH_SHA256);
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO);
+ ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
@@ -1814,7 +1814,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, burst_size);
if (burst_received == 0)
failed_polls++;
@@ -1824,10 +1825,13 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
total_cycles += end_cycles - start_cycles;
}
while (num_received != num_to_submit) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num,
+ 0, NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, burst_size);
if (burst_received == 0)
failed_polls++;
@@ -1876,7 +1880,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1885,7 +1889,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
ut_params->cipher_xform.cipher.key.length = CIPHER_IV_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1895,7 +1899,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1928,12 +1932,12 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1957,15 +1961,19 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent) < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent : DEFAULT_BURST_SIZE);
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, tx_mbufs,
+ ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
+ < DEFAULT_BURST_SIZE) ?
+ DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
+ DEFAULT_BURST_SIZE);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
@@ -1973,10 +1981,13 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
num_received += burst_received;
}
while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num,
+ 0, NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num, 0,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num, 0,
rx_mbufs, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
@@ -1987,7 +1998,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
mmps = (double)num_received*mhz/(end_cycles - start_cycles);
throughput = mmps*data_params[index].length*8;
printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
- data_params[index].length, num_sent, num_received);
+ data_params[index].length, num_sent, num_received);
printf("\t%.2f\t%u", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
@@ -2044,7 +2055,7 @@ perftest_aesni_mb_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
static int
perftest_qat_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_testsuite);
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index c7d884c..61d93cd 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -109,7 +109,7 @@ calculate_auth_precomputes(hash_one_block_t one_block_hash,
/** Get xform chain order */
static int
-aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
{
/*
* Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
@@ -119,12 +119,12 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
if (xform->next == NULL || xform->next->next != NULL)
return -1;
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return HASH_CIPHER;
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return CIPHER_HASH;
return -1;
@@ -134,11 +134,11 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
static int
aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
hash_one_block_t hash_oneblock_fn;
- if (xform->type != RTE_CRYPTO_XFORM_AUTH) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
MB_LOG_ERR("Crypto xform struct not of type auth");
return -1;
}
@@ -196,11 +196,11 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
static int
aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
aes_keyexp_t aes_keyexp_fn;
- if (xform->type != RTE_CRYPTO_XFORM_CIPHER) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
MB_LOG_ERR("Crypto xform struct not of type cipher");
return -1;
}
@@ -259,10 +259,10 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
- const struct rte_crypto_xform *auth_xform = NULL;
- const struct rte_crypto_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
/* Select Crypto operation - hash then cipher / cipher then hash */
switch (aesni_mb_get_chain_order(xform)) {
@@ -296,11 +296,11 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
if (unlikely(crypto_op->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
@@ -313,7 +313,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
return NULL;
sess = (struct aesni_mb_session *)
- ((struct rte_cryptodev_session *)_sess)->_private;
+ ((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
sess, crypto_op->xform) != 0)) {
@@ -339,7 +339,8 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
*/
static JOB_AES_HMAC *
process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_op *c_op, struct aesni_mb_session *session)
+ struct rte_crypto_sym_op *c_op,
+ struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
@@ -432,14 +433,14 @@ static struct rte_mbuf *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
struct rte_mbuf *m;
- struct rte_crypto_op *c_op;
+ struct rte_crypto_sym_op *c_op;
if (job->user_data == NULL)
return NULL;
/* handled retrieved job */
m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_op *)job->user_data2;
+ c_op = (struct rte_crypto_sym_op *)job->user_data2;
/* set status as successful by default */
c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -459,7 +460,7 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_OP_SESSIONLESS) {
+ if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
rte_mempool_put(qp->sess_mp, c_op->session);
c_op->session = NULL;
}
@@ -511,7 +512,8 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
int i, processed_jobs = 0;
for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i], RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(bufs[i],
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
qp->qp_stats.enqueue_err_count++;
goto flush_jobs;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index e43b898..3cd9990 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -108,7 +108,7 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
if (dev_info != NULL) {
dev_info->dev_type = dev->dev_type;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->max_nb_sessions = internals->max_nb_sessions;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
}
}
@@ -243,7 +243,7 @@ aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
/** Configure a aesni multi-buffer session from a crypto xform chain */
static void *
aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform, void *sess)
{
struct aesni_mb_private *internals = dev->data->dev_private;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 2f98609..ab70c15 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -218,7 +218,7 @@ struct aesni_mb_session {
extern int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform);
+ const struct rte_crypto_sym_xform *xform);
/** device specific operations function pointer structure */
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 828756b..e7b9027 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,37 +89,37 @@ void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
}
static int
-qat_get_cmd_id(const struct rte_crypto_xform *xform)
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
{
if (xform->next == NULL)
return -1;
/* Cipher Only */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_CIPHER; */
/* Authentication Only */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_AUTH; */
/* Cipher then Authenticate */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
/* Authenticate then Cipher */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
return -1;
}
static struct rte_crypto_auth_xform *
-qat_get_auth_xform(struct rte_crypto_xform *xform)
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return &xform->auth;
xform = xform->next;
@@ -129,10 +129,10 @@ qat_get_auth_xform(struct rte_crypto_xform *xform)
}
static struct rte_crypto_cipher_xform *
-qat_get_cipher_xform(struct rte_crypto_xform *xform)
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return &xform->cipher;
xform = xform->next;
@@ -144,7 +144,7 @@ qat_get_cipher_xform(struct rte_crypto_xform *xform)
void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private)
+ struct rte_crypto_sym_xform *xform, void *session_private)
{
struct qat_pmd_private *internals = dev->data->dev_private;
@@ -261,7 +261,7 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
auth_xform->digest_length))
goto error_out;
- return (struct rte_cryptodev_session *)session;
+ return (struct rte_crypto_sym_session *)session;
error_out:
rte_mempool_put(internals->sess_mp, session);
@@ -275,7 +275,7 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
+uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
register struct qat_queue *queue;
@@ -327,7 +327,8 @@ kick_tail:
}
uint16_t
-qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct rte_mbuf_offload *ol;
struct qat_queue *queue;
@@ -343,12 +344,13 @@ qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
msg_counter != nb_pkts) {
rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf, RTE_PKTMBUF_OL_CRYPTO);
-
+ ol = rte_pktmbuf_offload_get(rx_mbuf,
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ ol->op.crypto.status =
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
@@ -384,20 +386,21 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
"to (%p) mbuf.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
" requests mbuf (%p) is sessionless.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type != RTE_CRYPTODEV_QAT_PMD)) {
+ if (unlikely(ol->op.crypto.session->type
+ != RTE_CRYPTODEV_QAT_SYM_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
@@ -520,8 +523,8 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
ADF_NUM_SYM_QPS_PER_BUNDLE *
ADF_NUM_BUNDLES_PER_DEV;
- info->max_nb_sessions = internals->max_nb_sessions;
- info->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
}
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index d680364..e9f71fe 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -109,16 +109,18 @@ qat_crypto_sym_session_init(struct rte_mempool *mempool, void *priv_sess);
extern void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+ struct rte_crypto_sym_xform *xform, void *session_private);
extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
uint16_t
-qat_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
uint16_t
-qat_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index e500c1e..85700fc 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,11 +89,11 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->pci_dev->addr.devid,
cryptodev->pci_dev->addr.function);
- cryptodev->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
+ cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index a950b74..93de786 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -124,13 +124,13 @@ struct l2fwd_crypto_options {
enum l2fwd_crypto_xform_chain xform_chain;
- struct rte_crypto_xform cipher_xform;
+ struct rte_crypto_sym_xform cipher_xform;
uint8_t ckey_data[32];
- struct rte_crypto_key iv_key;
+ struct rte_crypto_sym_key iv_key;
uint8_t ivkey_data[16];
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform auth_xform;
uint8_t akey_data[128];
};
@@ -142,8 +142,11 @@ struct l2fwd_crypto_params {
unsigned digest_length;
unsigned block_size;
- struct rte_crypto_key iv_key;
- struct rte_cryptodev_session *session;
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } iv;
+ struct rte_cryptodev_sym_session *session;
};
/** lcore configuration */
@@ -372,7 +375,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
/* Append space for digest to end of packet */
ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
@@ -474,11 +477,11 @@ generate_random_key(uint8_t *key, unsigned length)
key[i] = rand() % 0xff;
}
-static struct rte_cryptodev_session *
+static struct rte_cryptodev_sym_session *
initialize_crypto_session(struct l2fwd_crypto_options *options,
uint8_t cdev_id)
{
- struct rte_crypto_xform *first_xform;
+ struct rte_crypto_sym_xform *first_xform;
if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
first_xform = &options->cipher_xform;
@@ -489,7 +492,7 @@ initialize_crypto_session(struct l2fwd_crypto_options *options,
}
/* Setup Cipher Parameters */
- return rte_cryptodev_session_create(cdev_id, first_xform);
+ return rte_cryptodev_sym_session_create(cdev_id, first_xform);
}
static void
@@ -610,7 +613,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
m = pkts_burst[j];
ol = rte_pktmbuf_offload_alloc(
l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
/*
* If we can't allocate a offload, then drop
* the rest of the burst and dequeue and
@@ -689,7 +692,7 @@ parse_cryptodev_type(enum rte_cryptodev_type *type, char *optarg)
*type = RTE_CRYPTODEV_AESNI_MB_PMD;
return 0;
} else if (strcmp("QAT", optarg) == 0) {
- *type = RTE_CRYPTODEV_QAT_PMD;
+ *type = RTE_CRYPTODEV_QAT_SYM_PMD;
return 0;
}
@@ -745,7 +748,7 @@ parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
/** Parse crypto key command line argument */
static int
-parse_key(struct rte_crypto_key *key __rte_unused,
+parse_key(struct rte_crypto_sym_key *key __rte_unused,
unsigned length __rte_unused, char *arg __rte_unused)
{
printf("Currently an unsupported argument!\n");
@@ -982,7 +985,7 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
switch (options->cdev_type) {
case RTE_CRYPTODEV_AESNI_MB_PMD:
printf("cryptodev type: AES-NI MB PMD\n"); break;
- case RTE_CRYPTODEV_QAT_PMD:
+ case RTE_CRYPTODEV_QAT_SYM_PMD:
printf("cryptodev type: QAT PMD\n"); break;
default:
break;
@@ -1179,7 +1182,7 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports)
unsigned i, cdev_id, cdev_count, enabled_cdev_count = 0;
int retval;
- if (options->cdev_type == RTE_CRYPTODEV_QAT_PMD) {
+ if (options->cdev_type == RTE_CRYPTODEV_QAT_SYM_PMD) {
if (rte_cryptodev_count() < nb_ports)
return -1;
} else if (options->cdev_type == RTE_CRYPTODEV_AESNI_MB_PMD) {
diff --git a/lib/librte_cryptodev/Makefile b/lib/librte_cryptodev/Makefile
index 81fa3fc..0d59229 100644
--- a/lib/librte_cryptodev/Makefile
+++ b/lib/librte_cryptodev/Makefile
@@ -45,6 +45,7 @@ SRCS-y += rte_cryptodev.c
# export include files
SYMLINK-y-include += rte_crypto.h
+SYMLINK-y-include += rte_crypto_sym.h
SYMLINK-y-include += rte_cryptodev.h
SYMLINK-y-include += rte_cryptodev_pmd.h
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index 42343a8..df0c0b8 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,572 +36,15 @@
/**
* @file rte_crypto.h
*
- * RTE Cryptographic Definitions
+ * RTE Cryptography Common Definitions
*
- * Defines symmetric cipher and authentication algorithms and modes, as well
- * as supported symmetric crypto operation combinations.
*/
#ifdef __cplusplus
extern "C" {
#endif
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_mempool.h>
-
-/** Symmetric Cipher Algorithms */
-enum rte_crypto_cipher_algorithm {
- RTE_CRYPTO_CIPHER_NULL = 1,
- /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
-
- RTE_CRYPTO_CIPHER_3DES_CBC,
- /**< Triple DES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_3DES_CTR,
- /**< Triple DES algorithm in CTR mode */
- RTE_CRYPTO_CIPHER_3DES_ECB,
- /**< Triple DES algorithm in ECB mode */
-
- RTE_CRYPTO_CIPHER_AES_CBC,
- /**< AES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_AES_CCM,
- /**< AES algorithm in CCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_CCM* element of the
- * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_xform* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation
- */
- RTE_CRYPTO_CIPHER_AES_CTR,
- /**< AES algorithm in Counter mode */
- RTE_CRYPTO_CIPHER_AES_ECB,
- /**< AES algorithm in ECB mode */
- RTE_CRYPTO_CIPHER_AES_F8,
- /**< AES algorithm in F8 mode */
- RTE_CRYPTO_CIPHER_AES_GCM,
- /**< AES algorithm in GCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_GCM* element of the
- * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_setup_data* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation.
- */
- RTE_CRYPTO_CIPHER_AES_XTS,
- /**< AES algorithm in XTS mode */
-
- RTE_CRYPTO_CIPHER_ARC4,
- /**< (A)RC4 cipher algorithm */
-
- RTE_CRYPTO_CIPHER_KASUMI_F8,
- /**< Kasumi algorithm in F8 mode */
-
- RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- /**< SNOW3G algorithm in UEA2 mode */
-
- RTE_CRYPTO_CIPHER_ZUC_EEA3
- /**< ZUC algorithm in EEA3 mode */
-};
-
-/** Symmetric Cipher Direction */
-enum rte_crypto_cipher_operation {
- RTE_CRYPTO_CIPHER_OP_ENCRYPT,
- /**< Encrypt cipher operation */
- RTE_CRYPTO_CIPHER_OP_DECRYPT
- /**< Decrypt cipher operation */
-};
-
-/** Crypto key structure */
-struct rte_crypto_key {
- uint8_t *data; /**< pointer to key data */
- phys_addr_t phys_addr;
- size_t length; /**< key length in bytes */
-};
-
-/**
- * Symmetric Cipher Setup Data.
- *
- * This structure contains data relating to Cipher (Encryption and Decryption)
- * use to create a session.
- */
-struct rte_crypto_cipher_xform {
- enum rte_crypto_cipher_operation op;
- /**< This parameter determines if the cipher operation is an encrypt or
- * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
- * only encrypt operations are valid.
- */
- enum rte_crypto_cipher_algorithm algo;
- /**< Cipher algorithm */
-
- struct rte_crypto_key key;
- /**< Cipher key
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
- * point to a concatenation of the AES encryption key followed by a
- * keymask. As per RFC3711, the keymask should be padded with trailing
- * bytes to match the length of the encryption key used.
- *
- * For AES-XTS mode of operation, two keys must be provided and
- * key.data must point to the two keys concatenated together (Key1 ||
- * Key2). The cipher key length will contain the total size of both
- * keys.
- *
- * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
- * 192 bits (24 bytes) or 256 bits (32 bytes).
- *
- * For the CCM mode of operation, the only supported key length is 128
- * bits (16 bytes).
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
- * should be set to the combined length of the encryption key and the
- * keymask. Since the keymask and the encryption key are the same size,
- * key.length should be set to 2 x the AES encryption key length.
- *
- * For the AES-XTS mode of operation:
- * - Two keys must be provided and key.length refers to total length of
- * the two keys.
- * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
- * - Both keys must have the same size.
- **/
-};
-
-/** Symmetric Authentication / Hash Algorithms */
-enum rte_crypto_auth_algorithm {
- RTE_CRYPTO_AUTH_NULL = 1,
- /**< NULL hash algorithm. */
-
- RTE_CRYPTO_AUTH_AES_CBC_MAC,
- /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
- RTE_CRYPTO_AUTH_AES_CCM,
- /**< AES algorithm in CCM mode. This is an authenticated cipher. When
- * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
- * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
- * set up the related rte_crypto_cipher_setup_data structure in the
- * session context or the corresponding parameter in the crypto
- * operation data structures op_params parameter MUST be set for a
- * session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_CMAC,
- /**< AES CMAC algorithm. */
- RTE_CRYPTO_AUTH_AES_GCM,
- /**< AES algorithm in GCM mode. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_GMAC,
- /**< AES GMAC algorithm. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_XCBC_MAC,
- /**< AES XCBC algorithm. */
-
- RTE_CRYPTO_AUTH_KASUMI_F9,
- /**< Kasumi algorithm in F9 mode. */
-
- RTE_CRYPTO_AUTH_MD5,
- /**< MD5 algorithm */
- RTE_CRYPTO_AUTH_MD5_HMAC,
- /**< HMAC using MD5 algorithm */
-
- RTE_CRYPTO_AUTH_SHA1,
- /**< 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA1_HMAC,
- /**< HMAC using 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224,
- /**< 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224_HMAC,
- /**< HMAC using 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256,
- /**< 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256_HMAC,
- /**< HMAC using 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384,
- /**< 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384_HMAC,
- /**< HMAC using 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512,
- /**< 512 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512_HMAC,
- /**< HMAC using 512 bit SHA algorithm. */
-
- RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- /**< SNOW3G algorithm in UIA2 mode. */
-
- RTE_CRYPTO_AUTH_ZUC_EIA3,
- /**< ZUC algorithm in EIA3 mode */
-};
-
-/** Symmetric Authentication / Hash Operations */
-enum rte_crypto_auth_operation {
- RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
- RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
-};
-
-/**
- * Authentication / Hash transform data.
- *
- * This structure contains data relating to an authentication/hash crypto
- * transforms. The fields op, algo and digest_length are common to all
- * authentication transforms and MUST be set.
- */
-struct rte_crypto_auth_xform {
- enum rte_crypto_auth_operation op;
- /**< Authentication operation type */
- enum rte_crypto_auth_algorithm algo;
- /**< Authentication algorithm selection */
-
- struct rte_crypto_key key; /**< Authentication key data.
- * The authentication key length MUST be less than or equal to the
- * block size of the algorithm. It is the callers responsibility to
- * ensure that the key length is compliant with the standard being used
- * (for example RFC 2104, FIPS 198a).
- */
-
- uint32_t digest_length;
- /**< Length of the digest to be returned. If the verify option is set,
- * this specifies the length of the digest to be compared for the
- * session.
- *
- * If the value is less than the maximum length allowed by the hash,
- * the result shall be truncated. If the value is greater than the
- * maximum length allowed by the hash then an error will be generated
- * by *rte_cryptodev_session_create* or by the
- * *rte_cryptodev_enqueue_burst* if using session-less APIs.
- */
-
- uint32_t add_auth_data_length;
- /**< The length of the additional authenticated data (AAD) in bytes.
- * The maximum permitted value is 240 bytes, unless otherwise specified
- * below.
- *
- * This field must be specified when the hash algorithm is one of the
- * following:
- *
- * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
- * length of the IV (which should be 16).
- *
- * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
- * the length of the Additional Authenticated Data (called A, in NIST
- * SP800-38D).
- *
- * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
- * the length of the associated data (called A, in NIST SP800-38C).
- * Note that this does NOT include the length of any padding, or the
- * 18 bytes reserved at the start of the above field to store the
- * block B0 and the encoded length. The maximum permitted value in
- * this case is 222 bytes.
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
- * this field is not used and should be set to 0. Instead the length
- * of the AAD data is specified in the message length to hash field of
- * the rte_crypto_op_data structure.
- */
-};
-
-/** Crypto transformation types */
-enum rte_crypto_xform_type {
- RTE_CRYPTO_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
- RTE_CRYPTO_XFORM_AUTH, /**< Authentication xform */
- RTE_CRYPTO_XFORM_CIPHER /**< Cipher xform */
-};
-
-/**
- * Crypto transform structure.
- *
- * This is used to specify the crypto transforms required, multiple transforms
- * can be chained together to specify a chain transforms such as authentication
- * then cipher, or cipher then authentication. Each transform structure can
- * hold a single transform, the type field is used to specify which transform
- * is contained within the union
- */
-struct rte_crypto_xform {
- struct rte_crypto_xform *next; /**< next xform in chain */
-
- enum rte_crypto_xform_type type; /**< xform type */
- union {
- struct rte_crypto_auth_xform auth;
- /**< Authentication / hash xform */
- struct rte_crypto_cipher_xform cipher;
- /**< Cipher xform */
- };
-};
-
-/**
- * Crypto operation session type. This is used to specify whether a crypto
- * operation has session structure attached for immutable parameters or if all
- * operation information is included in the operation data structure.
- */
-enum rte_crypto_op_sess_type {
- RTE_CRYPTO_OP_WITH_SESSION, /**< Session based crypto operation */
- RTE_CRYPTO_OP_SESSIONLESS /**< Session-less crypto operation */
-};
-
-/** Status of crypto operation */
-enum rte_crypto_op_status {
- RTE_CRYPTO_OP_STATUS_SUCCESS,
- /**< Operation completed successfully */
- RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
- /**< Operation not yet submitted to a cryptodev */
- RTE_CRYPTO_OP_STATUS_ENQUEUED,
- /**< Operation is enqueued on device */
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
- /**< Authentication verification failed */
- RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
- /**< Operation failed due to invalid arguments in request */
- RTE_CRYPTO_OP_STATUS_ERROR,
- /**< Error handling operation */
-};
-
-/**
- * Cryptographic Operation Data.
- *
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_enqueue_burst() call
- * for performing cipher, hash, or a combined hash and cipher operations.
- */
-struct rte_crypto_op {
- enum rte_crypto_op_sess_type type;
- enum rte_crypto_op_status status;
-
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
-
- union {
- struct rte_cryptodev_session *session;
- /**< Handle for the initialised session context */
- struct rte_crypto_xform *xform;
- /**< Session-less API crypto operation parameters */
- };
-
- struct {
- struct {
- uint32_t offset;
- /**< Starting point for cipher processing, specified
- * as number of bytes from start of data in the source
- * buffer. The result of the cipher operation will be
- * written back into the output buffer starting at
- * this location.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source buffer
- * on which the cryptographic operation will be
- * computed. This must be a multiple of the block size
- * if a block cipher is being used. This is also the
- * same as the result length.
- *
- * @note
- * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
- * this value should not include the length of the
- * padding or the length of the MAC; the driver will
- * compute the actual number of bytes over which the
- * encryption will occur, which will include these
- * values.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
- * field should be set to 0.
- */
- } to_cipher; /**< Data offsets and length for ciphering */
-
- struct {
- uint32_t offset;
- /**< Starting point for hash processing, specified as
- * number of bytes from start of packet in source
- * buffer.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
- * mode of operation, this field specifies the start
- * of the AAD data in the source buffer.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source
- * buffer that the hash will be computed on.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
- * of operation, this field specifies the length of
- * the AAD data in the source buffer.
- */
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
-
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
-
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
-
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
-
- void *user_data;
- /**< opaque pointer for user data */
-};
-
-
-/**
- * Reset the fields of a crypto operation to their default values.
- *
- * @param op The crypto operation to be reset.
- */
-static inline void
-__rte_crypto_op_reset(struct rte_crypto_op *op)
-{
- op->type = RTE_CRYPTO_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
-}
-
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_op_attach_session(struct rte_crypto_op *op,
- struct rte_cryptodev_session *sess)
-{
- op->session = sess;
- op->type = RTE_CRYPTO_OP_WITH_SESSION;
-}
+#include <rte_crypto_sym.h>
#ifdef __cplusplus
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
new file mode 100644
index 0000000..cb2b8f6
--- /dev/null
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -0,0 +1,613 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_SYM_H_
+#define _RTE_CRYPTO_SYM_H_
+
+/**
+ * @file rte_crypto_sym.h
+ *
+ * RTE Definitions for Symmetric Cryptography
+ *
+ * Defines symmetric cipher and authentication algorithms and modes, as well
+ * as supported symmetric crypto operation combinations.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+
+/** Symmetric Cipher Algorithms */
+enum rte_crypto_cipher_algorithm {
+ RTE_CRYPTO_CIPHER_NULL = 1,
+ /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
+
+ RTE_CRYPTO_CIPHER_3DES_CBC,
+ /**< Triple DES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_3DES_CTR,
+ /**< Triple DES algorithm in CTR mode */
+ RTE_CRYPTO_CIPHER_3DES_ECB,
+ /**< Triple DES algorithm in ECB mode */
+
+ RTE_CRYPTO_CIPHER_AES_CBC,
+ /**< AES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_AES_CCM,
+ /**< AES algorithm in CCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_CCM* element of the
+ * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_xform* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation
+ */
+ RTE_CRYPTO_CIPHER_AES_CTR,
+ /**< AES algorithm in Counter mode */
+ RTE_CRYPTO_CIPHER_AES_ECB,
+ /**< AES algorithm in ECB mode */
+ RTE_CRYPTO_CIPHER_AES_F8,
+ /**< AES algorithm in F8 mode */
+ RTE_CRYPTO_CIPHER_AES_GCM,
+ /**< AES algorithm in GCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_GCM* element of the
+ * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_setup_data* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_CIPHER_AES_XTS,
+ /**< AES algorithm in XTS mode */
+
+ RTE_CRYPTO_CIPHER_ARC4,
+ /**< (A)RC4 cipher algorithm */
+
+ RTE_CRYPTO_CIPHER_KASUMI_F8,
+ /**< Kasumi algorithm in F8 mode */
+
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ /**< SNOW3G algorithm in UEA2 mode */
+
+ RTE_CRYPTO_CIPHER_ZUC_EEA3
+ /**< ZUC algorithm in EEA3 mode */
+};
+
+/** Symmetric Cipher Direction */
+enum rte_crypto_cipher_operation {
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ /**< Encrypt cipher operation */
+ RTE_CRYPTO_CIPHER_OP_DECRYPT
+ /**< Decrypt cipher operation */
+};
+
+/**
+ * Symmetric Cipher Setup Data.
+ *
+ * This structure contains data relating to Cipher (Encryption and Decryption)
+ * use to create a session.
+ */
+struct rte_crypto_cipher_xform {
+ enum rte_crypto_cipher_operation op;
+ /**< This parameter determines if the cipher operation is an encrypt or
+ * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
+ * only encrypt operations are valid.
+ */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< Cipher algorithm */
+
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } key;
+ /**< Cipher key
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
+ * point to a concatenation of the AES encryption key followed by a
+ * keymask. As per RFC3711, the keymask should be padded with trailing
+ * bytes to match the length of the encryption key used.
+ *
+ * For AES-XTS mode of operation, two keys must be provided and
+ * key.data must point to the two keys concatenated together (Key1 ||
+ * Key2). The cipher key length will contain the total size of both
+ * keys.
+ *
+ * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
+ * 192 bits (24 bytes) or 256 bits (32 bytes).
+ *
+ * For the CCM mode of operation, the only supported key length is 128
+ * bits (16 bytes).
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
+ * should be set to the combined length of the encryption key and the
+ * keymask. Since the keymask and the encryption key are the same size,
+ * key.length should be set to 2 x the AES encryption key length.
+ *
+ * For the AES-XTS mode of operation:
+ * - Two keys must be provided and key.length refers to total length of
+ * the two keys.
+ * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
+ * - Both keys must have the same size.
+ **/
+};
+
+/** Symmetric Authentication / Hash Algorithms */
+enum rte_crypto_auth_algorithm {
+ RTE_CRYPTO_AUTH_NULL = 1,
+ /**< NULL hash algorithm. */
+
+ RTE_CRYPTO_AUTH_AES_CBC_MAC,
+ /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
+ RTE_CRYPTO_AUTH_AES_CCM,
+ /**< AES algorithm in CCM mode. This is an authenticated cipher. When
+ * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
+ * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
+ * set up the related rte_crypto_cipher_setup_data structure in the
+ * session context or the corresponding parameter in the crypto
+ * operation data structures op_params parameter MUST be set for a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_CMAC,
+ /**< AES CMAC algorithm. */
+ RTE_CRYPTO_AUTH_AES_GCM,
+ /**< AES algorithm in GCM mode. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_GMAC,
+ /**< AES GMAC algorithm. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ /**< AES XCBC algorithm. */
+
+ RTE_CRYPTO_AUTH_KASUMI_F9,
+ /**< Kasumi algorithm in F9 mode. */
+
+ RTE_CRYPTO_AUTH_MD5,
+ /**< MD5 algorithm */
+ RTE_CRYPTO_AUTH_MD5_HMAC,
+ /**< HMAC using MD5 algorithm */
+
+ RTE_CRYPTO_AUTH_SHA1,
+ /**< 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA1_HMAC,
+ /**< HMAC using 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224,
+ /**< 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224_HMAC,
+ /**< HMAC using 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256,
+ /**< 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256_HMAC,
+ /**< HMAC using 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384,
+ /**< 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384_HMAC,
+ /**< HMAC using 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512,
+ /**< 512 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512_HMAC,
+ /**< HMAC using 512 bit SHA algorithm. */
+
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ /**< SNOW3G algorithm in UIA2 mode. */
+
+ RTE_CRYPTO_AUTH_ZUC_EIA3,
+ /**< ZUC algorithm in EIA3 mode */
+};
+
+/** Symmetric Authentication / Hash Operations */
+enum rte_crypto_auth_operation {
+ RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
+ RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
+};
+
+/**
+ * Authentication / Hash transform data.
+ *
+ * This structure contains data relating to an authentication/hash crypto
+ * transforms. The fields op, algo and digest_length are common to all
+ * authentication transforms and MUST be set.
+ */
+struct rte_crypto_auth_xform {
+ enum rte_crypto_auth_operation op;
+ /**< Authentication operation type */
+ enum rte_crypto_auth_algorithm algo;
+ /**< Authentication algorithm selection */
+
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } key;
+ /**< Authentication key data.
+ * The authentication key length MUST be less than or equal to the
+ * block size of the algorithm. It is the callers responsibility to
+ * ensure that the key length is compliant with the standard being used
+ * (for example RFC 2104, FIPS 198a).
+ */
+
+ uint32_t digest_length;
+ /**< Length of the digest to be returned. If the verify option is set,
+ * this specifies the length of the digest to be compared for the
+ * session.
+ *
+ * If the value is less than the maximum length allowed by the hash,
+ * the result shall be truncated. If the value is greater than the
+ * maximum length allowed by the hash then an error will be generated
+ * by *rte_cryptodev_sym_session_create* or by the
+ * *rte_cryptodev_sym_enqueue_burst* if using session-less APIs.
+ */
+
+ uint32_t add_auth_data_length;
+ /**< The length of the additional authenticated data (AAD) in bytes.
+ * The maximum permitted value is 240 bytes, unless otherwise specified
+ * below.
+ *
+ * This field must be specified when the hash algorithm is one of the
+ * following:
+ *
+ * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
+ * length of the IV (which should be 16).
+ *
+ * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
+ * the length of the Additional Authenticated Data (called A, in NIST
+ * SP800-38D).
+ *
+ * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
+ * the length of the associated data (called A, in NIST SP800-38C).
+ * Note that this does NOT include the length of any padding, or the
+ * 18 bytes reserved at the start of the above field to store the
+ * block B0 and the encoded length. The maximum permitted value in
+ * this case is 222 bytes.
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
+ * this field is not used and should be set to 0. Instead the length
+ * of the AAD data is specified in the message length to hash field of
+ * the rte_crypto_sym_op_data structure.
+ */
+};
+
+/** Crypto transformation types */
+enum rte_crypto_sym_xform_type {
+ RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
+ RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */
+ RTE_CRYPTO_SYM_XFORM_CIPHER /**< Cipher xform */
+};
+
+/**
+ * Symmetric crypto transform structure.
+ *
+ * This is used to specify the crypto transforms required, multiple transforms
+ * can be chained together to specify a chain transforms such as authentication
+ * then cipher, or cipher then authentication. Each transform structure can
+ * hold a single transform, the type field is used to specify which transform
+ * is contained within the union
+ */
+struct rte_crypto_sym_xform {
+ struct rte_crypto_sym_xform *next;
+ /**< next xform in chain */
+ enum rte_crypto_sym_xform_type type
+ ; /**< xform type */
+ union {
+ struct rte_crypto_auth_xform auth;
+ /**< Authentication / hash xform */
+ struct rte_crypto_cipher_xform cipher;
+ /**< Cipher xform */
+ };
+};
+
+/**
+ * Crypto operation session type. This is used to specify whether a crypto
+ * operation has session structure attached for immutable parameters or if all
+ * operation information is included in the operation data structure.
+ */
+enum rte_crypto_sym_op_sess_type {
+ RTE_CRYPTO_SYM_OP_WITH_SESSION, /**< Session based crypto operation */
+ RTE_CRYPTO_SYM_OP_SESSIONLESS /**< Session-less crypto operation */
+};
+
+
+/** Status of crypto operation */
+enum rte_crypto_op_status {
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ /**< Operation completed successfully */
+ RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
+ /**< Operation not yet submitted to a cryptodev */
+ RTE_CRYPTO_OP_STATUS_ENQUEUED,
+ /**< Operation is enqueued on device */
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
+ /**< Authentication verification failed */
+ RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
+ /**< Operation failed due to invalid arguments in request */
+ RTE_CRYPTO_OP_STATUS_ERROR,
+ /**< Error handling operation */
+};
+
+/**
+ * Cryptographic Operation Data.
+ *
+ * This structure contains data relating to performing cryptographic processing
+ * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
+ * call for performing cipher, hash, or a combined hash and cipher operations.
+ */
+struct rte_crypto_sym_op {
+ enum rte_crypto_sym_op_sess_type type;
+ enum rte_crypto_op_status status;
+
+ struct {
+ struct rte_mbuf *m; /**< Destination mbuf */
+ uint8_t offset; /**< Data offset */
+ } dst;
+
+ union {
+ struct rte_cryptodev_sym_session *session;
+ /**< Handle for the initialised session context */
+ struct rte_crypto_sym_xform *xform;
+ /**< Session-less API crypto operation parameters */
+ };
+
+ struct {
+ struct {
+ uint32_t offset;
+ /**< Starting point for cipher processing, specified
+ * as number of bytes from start of data in the source
+ * buffer. The result of the cipher operation will be
+ * written back into the output buffer starting at
+ * this location.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source buffer
+ * on which the cryptographic operation will be
+ * computed. This must be a multiple of the block size
+ * if a block cipher is being used. This is also the
+ * same as the result length.
+ *
+ * @note
+ * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
+ * this value should not include the length of the
+ * padding or the length of the MAC; the driver will
+ * compute the actual number of bytes over which the
+ * encryption will occur, which will include these
+ * values.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
+ * field should be set to 0.
+ */
+ } to_cipher; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint32_t offset;
+ /**< Starting point for hash processing, specified as
+ * number of bytes from start of packet in source
+ * buffer.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
+ * mode of operation, this field specifies the start
+ * of the AAD data in the source buffer.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source
+ * buffer that the hash will be computed on.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
+ * of operation, this field specifies the length of
+ * the AAD data in the source buffer.
+ */
+ } to_hash; /**< Data offsets and length for authentication */
+ } data; /**< Details of data to be operated on */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
+ * Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length is 96
+ * bits) or J0 (for other sizes), where J0 is as defined by
+ * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
+ * needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the nonce
+ * should be written starting at &iv[1] (to allow space for the
+ * implementation to write in the flags in the first byte).
+ * Note that a full 16 bytes should be allocated, even though
+ * the length field will have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
+ * 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD be
+ * 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ size_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the length of the
+ * IV (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length of the
+ * counter (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
+ * which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce, which can
+ * be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result should be
+ * inserted (in the case of digest generation) or where the
+ * purported digest exists (in the case of digest
+ * verification).
+ *
+ * At session creation time, the client specified the digest
+ * result length with the digest_length member of the @ref
+ * rte_crypto_auth_xform structure. For physical crypto
+ * devices the caller must allocate at least digest_length of
+ * physically contiguous memory at this location.
+ *
+ * For digest generation, the digest result will overwrite
+ * any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is understood
+ * to be in the destination buffer for digest generation, and
+ * in the source buffer for digest verification. The location
+ * of the digest result in this case is immediately following
+ * the region over which the digest is computed.
+ */
+ phys_addr_t phys_addr; /**< Physical address of digest */
+ uint32_t length; /**< Length of digest */
+ } digest; /**< Digest parameters */
+
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD) needed for
+ * authenticated cipher mechanisms (CCM and GCM), and to the IV
+ * for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is set up
+ * for the session in the @ref rte_crypto_auth_xform structure
+ * as part of the @ref rte_cryptodev_sym_session_create function
+ * call. This length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
+ * caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset of one
+ * byte into the array, leaving room for the implementation
+ * to write in the flags to the first byte.
+ *
+ * - the additional authentication data itself should be
+ * written starting at an offset of 18 bytes into the array,
+ * leaving room for the length encoding in the first two
+ * bytes of the second block.
+ *
+ * - the array should be big enough to hold the above fields,
+ * plus any padding to round this up to the nearest multiple
+ * of the block size (16 bytes). Padding will be added by
+ * the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus any
+ * space to round this up to the nearest multiple of the
+ * block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set to 0.
+ * Instead the AAD data should be placed in the source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint32_t length; /**< Length of digest */
+ } additional_auth;
+ /**< Additional authentication parameters */
+
+ struct rte_mempool *pool;
+ /**< mempool used to allocate crypto op */
+
+ void *user_data;
+ /**< opaque pointer for user data */
+};
+
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ */
+static inline void
+__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
+{
+ op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
+ op->dst.m = NULL;
+ op->dst.offset = 0;
+}
+
+/** Attach a session to a crypto operation */
+static inline void
+rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ op->session = sess;
+ op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTO_SYM_H_ */
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 2838852..c7fef6a 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -572,8 +572,8 @@ rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id);
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id);
int
rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
@@ -604,8 +604,10 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
}
/* Setup Session mempool for device */
- return rte_crypto_session_pool_create(dev, config->session_mp.nb_objs,
- config->session_mp.cache_size, config->socket_id);
+ return rte_cryptodev_sym_session_pool_create(dev,
+ config->session_mp.nb_objs,
+ config->session_mp.cache_size,
+ config->socket_id);
}
@@ -911,12 +913,12 @@ rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
static void
-rte_crypto_session_init(struct rte_mempool *mp,
+rte_cryptodev_sym_session_init(struct rte_mempool *mp,
void *opaque_arg,
void *_sess,
__rte_unused unsigned i)
{
- struct rte_cryptodev_session *sess = _sess;
+ struct rte_cryptodev_sym_session *sess = _sess;
struct rte_cryptodev *dev = opaque_arg;
memset(sess, 0, mp->elt_size);
@@ -930,8 +932,8 @@ rte_crypto_session_init(struct rte_mempool *mp,
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id)
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id)
{
char mp_name[RTE_CRYPTODEV_NAME_MAX_LEN];
unsigned priv_sess_size;
@@ -951,7 +953,7 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
return -ENOMEM;
}
- unsigned elt_size = sizeof(struct rte_cryptodev_session) +
+ unsigned elt_size = sizeof(struct rte_cryptodev_sym_session) +
priv_sess_size;
dev->data->session_pool = rte_mempool_lookup(mp_name);
@@ -975,7 +977,8 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
0, /* private data size */
NULL, /* obj initialization constructor */
NULL, /* obj initialization constructor arg */
- rte_crypto_session_init, /* obj constructor */
+ rte_cryptodev_sym_session_init,
+ /**< obj constructor*/
dev, /* obj constructor arg */
socket_id, /* socket id */
0); /* flags */
@@ -990,11 +993,12 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
return 0;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform)
{
struct rte_cryptodev *dev;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
void *_sess;
if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
@@ -1010,7 +1014,7 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
return NULL;
}
- sess = (struct rte_cryptodev_session *)_sess;
+ sess = (struct rte_cryptodev_sym_session *)_sess;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_configure, NULL);
if (dev->dev_ops->session_configure(dev, xform, sess->_private) ==
@@ -1026,8 +1030,9 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
return sess;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id, struct rte_cryptodev_session *sess)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess)
{
struct rte_cryptodev *dev;
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 892375d..732e2b9 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -1,6 +1,6 @@
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,14 +57,14 @@ extern "C" {
/**< Null crypto PMD device name */
#define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd")
/**< AES-NI Multi buffer PMD device name */
-#define CRYPTODEV_NAME_QAT_PMD ("cryptodev_qat_pmd")
-/**< Intel QAT PMD device name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd")
+/**< Intel QAT SYM PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
- RTE_CRYPTODEV_QAT_PMD, /**< QAT PMD */
+ RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT SYM PMD */
};
/* Logging Macros */
@@ -99,8 +99,11 @@ struct rte_cryptodev_info {
unsigned max_nb_queue_pairs;
/**< Maximum number of queues pairs supported by device. */
- unsigned max_nb_sessions;
- /**< Maximum number of sessions supported by device. */
+
+ struct {
+ unsigned max_nb_sessions;
+ /**< Maximum number of sessions supported by device. */
+ } sym;
};
#define RTE_CRYPTODEV_DETACHED (0)
@@ -130,17 +133,6 @@ struct rte_cryptodev_qp_conf {
typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
enum rte_cryptodev_event_type event, void *cb_arg);
-#ifdef RTE_CRYPTODEV_PERF
-/**
- * Crypto Device performance counter statistics structure. This structure is
- * used for RDTSC counters for measuring crypto operations.
- */
-struct rte_cryptodev_perf_stats {
- uint64_t t_accumlated; /**< Accumulated time processing operation */
- uint64_t t_min; /**< Max time */
- uint64_t t_max; /**< Min time */
-};
-#endif
/** Crypto Device statistics */
struct rte_cryptodev_stats {
@@ -153,29 +145,8 @@ struct rte_cryptodev_stats {
/**< Total error count on operations enqueued */
uint64_t dequeue_err_count;
/**< Total error count on operations dequeued */
+};
-#ifdef RTE_CRYPTODEV_DETAILED_STATS
- struct {
- uint64_t encrypt_ops; /**< Count of encrypt operations */
- uint64_t encrypt_bytes; /**< Number of bytes encrypted */
-
- uint64_t decrypt_ops; /**< Count of decrypt operations */
- uint64_t decrypt_bytes; /**< Number of bytes decrypted */
- } cipher; /**< Cipher operations stats */
-
- struct {
- uint64_t generate_ops; /**< Count of generate operations */
- uint64_t bytes_hashed; /**< Number of bytes hashed */
-
- uint64_t verify_ops; /**< Count of verify operations */
- uint64_t bytes_verified;/**< Number of bytes verified */
- } hash; /**< Hash operations stats */
-#endif
-
-#ifdef RTE_CRYPTODEV_PERF
- struct rte_cryptodev_perf_stats op_perf; /**< Operations stats */
-#endif
-} __rte_cache_aligned;
/**
* Create a virtual crypto device
@@ -607,6 +578,23 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
}
+/** Cryptodev symmetric crypto session */
+struct rte_cryptodev_sym_session {
+ struct {
+ uint8_t dev_id;
+ /**< Device Id */
+ enum rte_cryptodev_type type;
+ /** Crypto Device type session created on */
+ struct rte_mempool *mp;
+ /**< Mempool session allocated from */
+ } __rte_aligned(8);
+ /**< Public symmetric session details */
+
+ char _private[0];
+ /**< Private session material */
+};
+
+
/**
* Initialise a session for symmetric cryptographic operations.
*
@@ -628,9 +616,9 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
* @return
* Pointer to the created session or NULL
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id,
- struct rte_crypto_xform *xform);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform);
/**
@@ -638,15 +626,15 @@ rte_cryptodev_session_create(uint8_t dev_id,
*
* @param dev_id The device identifier.
* @param session Session pointer previously allocated by
- * *rte_cryptodev_session_create*.
+ * *rte_cryptodev_sym_session_create*.
*
* @return
* NULL on successful freeing of session.
* Session pointer on failure to free session.
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id,
- struct rte_cryptodev_session *session);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *session);
#ifdef __cplusplus
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index 8270afa..7d049ea 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -1,6 +1,6 @@
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,11 +56,6 @@ extern "C" {
#include "rte_crypto.h"
#include "rte_cryptodev.h"
-struct rte_cryptodev_stats;
-struct rte_cryptodev_info;
-struct rte_cryptodev_qp_conf;
-
-enum rte_cryptodev_event_type;
#ifdef RTE_LIBRTE_CRYPTODEV_DEBUG
#define RTE_PMD_DEBUG_TRACE(...) \
@@ -80,7 +75,6 @@ struct rte_cryptodev_session {
};
struct rte_cryptodev_driver;
-struct rte_cryptodev;
/**
* Initialisation function of a crypto driver invoked for each matching
@@ -364,7 +358,7 @@ typedef uint32_t (*cryptodev_queue_pair_count_t)(struct rte_cryptodev *dev);
* - On success returns a pointer to a rte_mempool
* - On failure returns a NULL pointer
*/
-typedef int (*cryptodev_create_session_pool_t)(
+typedef int (*cryptodev_sym_create_session_pool_t)(
struct rte_cryptodev *dev, unsigned nb_objs,
unsigned obj_cache_size, int socket_id);
@@ -378,7 +372,7 @@ typedef int (*cryptodev_create_session_pool_t)(
* - On success returns the size of the session structure for device
* - On failure returns 0
*/
-typedef unsigned (*cryptodev_get_session_private_size_t)(
+typedef unsigned (*cryptodev_sym_get_session_private_size_t)(
struct rte_cryptodev *dev);
/**
@@ -392,7 +386,7 @@ typedef unsigned (*cryptodev_get_session_private_size_t)(
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
+typedef void (*cryptodev_sym_initialize_session_t)(struct rte_mempool *mempool,
void *session_private);
/**
@@ -406,14 +400,14 @@ typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void * (*cryptodev_configure_session_t)(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+typedef void * (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
/**
* Free Crypto session.
* @param session Cryptodev session structure to free
*/
-typedef void (*cryptodev_free_session_t)(struct rte_cryptodev *dev,
+typedef void (*cryptodev_sym_free_session_t)(struct rte_cryptodev *dev,
void *session_private);
@@ -427,9 +421,9 @@ struct rte_cryptodev_ops {
cryptodev_info_get_t dev_infos_get; /**< Get device info. */
cryptodev_stats_get_t stats_get;
- /**< Get generic device statistics. */
+ /**< Get device statistics. */
cryptodev_stats_reset_t stats_reset;
- /**< Reset generic device statistics. */
+ /**< Reset device statistics. */
cryptodev_queue_pair_setup_t queue_pair_setup;
/**< Set up a device queue pair. */
@@ -442,13 +436,13 @@ struct rte_cryptodev_ops {
cryptodev_queue_pair_count_t queue_pair_count;
/**< Get count of the queue pairs. */
- cryptodev_get_session_private_size_t session_get_size;
+ cryptodev_sym_get_session_private_size_t session_get_size;
/**< Return private session. */
- cryptodev_initialize_session_t session_initialize;
+ cryptodev_sym_initialize_session_t session_initialize;
/**< Initialization function for private session data */
- cryptodev_configure_session_t session_configure;
+ cryptodev_sym_configure_session_t session_configure;
/**< Configure a Crypto session. */
- cryptodev_free_session_t session_clear;
+ cryptodev_sym_free_session_t session_clear;
/**< Clear a Crypto sessions private data. */
};
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index ff8e93d..a46af6f 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -16,8 +16,8 @@ DPDK_2.2 {
rte_cryptodev_pmd_driver_register;
rte_cryptodev_pmd_release_device;
rte_cryptodev_pmd_virtual_dev_init;
- rte_cryptodev_session_create;
- rte_cryptodev_session_free;
+ rte_cryptodev_sym_session_create;
+ rte_cryptodev_sym_session_free;
rte_cryptodev_socket_id;
rte_cryptodev_start;
rte_cryptodev_stats_get;
@@ -29,4 +29,4 @@ DPDK_2.2 {
rte_cryptodev_queue_pair_stop;
local: *;
-};
\ No newline at end of file
+};
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
index 77993b6..5ce6058 100644
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ b/lib/librte_mbuf_offload/rte_mbuf_offload.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -67,7 +67,7 @@ extern "C" {
enum rte_mbuf_ol_op_type {
RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
/**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO
+ RTE_PKTMBUF_OL_CRYPTO_SYM
/**< Crypto offload operation */
};
@@ -84,7 +84,7 @@ struct rte_mbuf_offload {
enum rte_mbuf_ol_op_type type; /**< offload type */
union {
- struct rte_crypto_op crypto; /**< Crypto operation */
+ struct rte_crypto_sym_op crypto; /**< Crypto operation */
} op;
};
@@ -194,8 +194,8 @@ __rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
ol->type = type;
switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO:
- __rte_crypto_op_reset(&ol->op.crypto); break;
+ case RTE_PKTMBUF_OL_CRYPTO_SYM:
+ __rte_crypto_sym_op_reset(&ol->op.crypto); break;
default:
break;
}
@@ -278,24 +278,24 @@ __rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
* - On success returns pointer to first crypto xform in crypto operations chain
* - On failure returns NULL
*/
-static inline struct rte_crypto_xform *
-rte_pktmbuf_offload_alloc_crypto_xforms(struct rte_mbuf_offload *ol,
+static inline struct rte_crypto_sym_xform *
+rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
unsigned nb_xforms)
{
- struct rte_crypto_xform *xform;
+ struct rte_crypto_sym_xform *xform;
void *priv_data;
uint16_t size;
- size = sizeof(struct rte_crypto_xform) * nb_xforms;
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
if (priv_data == NULL)
return NULL;
- ol->op.crypto.xform = xform = (struct rte_crypto_xform *)priv_data;
+ ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
do {
- xform->type = RTE_CRYPTO_XFORM_NOT_SPECIFIED;
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
} while (xform);
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v7 2/2] cryptodev: change burst API to be crypto op oriented
2016-03-04 18:29 ` [dpdk-dev] [PATCH v6 0/2] cryptodev API changes Fiona Trahe
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 " Fiona Trahe
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 1/2] cryptodev: API tidy and changes to support future extensions Fiona Trahe
@ 2016-03-07 11:50 ` Fiona Trahe
2016-03-08 14:32 ` Thomas Monjalon
2016-03-15 6:46 ` [dpdk-dev] [PATCH v6 0/2] cryptodev API changes Cao, Min
3 siblings, 1 reply; 62+ messages in thread
From: Fiona Trahe @ 2016-03-07 11:50 UTC (permalink / raw)
To: dev
This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts
rte_crypto_op's rather than the current implementation which operates on
rte_mbuf bursts, this simplifies the burst processing in the crypto PMDs and the
use of crypto operations in general.
The changes also continues the separatation of the symmetric operation parameters
from the more general operation parameters, this will simplify the integration of
asymmetric crypto operations in the future.
As well as the changes to the crypto APIs this patch adds functions for managing
rte_crypto_op pools to the cryptodev API. It modifies the existing PMDs, unit
tests and sample application to work with the modified APIs and finally
removes the now unused rte_mbuf_offload library.
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
---
MAINTAINERS | 4 -
app/test/test_cryptodev.c | 804 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 253 +++----
config/common_base | 7 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 171 +++--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 12 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 2 +-
drivers/crypto/qat/qat_crypto.c | 123 ++--
drivers/crypto/qat/qat_crypto.h | 12 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 4 +-
examples/l2fwd-crypto/main.c | 283 ++++----
lib/Makefile | 1 -
lib/librte_cryptodev/rte_crypto.h | 364 +++++++++-
lib/librte_cryptodev/rte_crypto_sym.h | 379 +++++-----
lib/librte_cryptodev/rte_cryptodev.c | 76 ++
lib/librte_cryptodev/rte_cryptodev.h | 113 ++-
lib/librte_cryptodev/rte_cryptodev_version.map | 5 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 --------
.../rte_mbuf_offload_version.map | 7 -
24 files changed, 1576 insertions(+), 1522 deletions(-)
delete mode 100644 lib/librte_mbuf_offload/Makefile
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
diff --git a/MAINTAINERS b/MAINTAINERS
index 628bc05..8d84dda 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -222,10 +222,6 @@ F: lib/librte_mbuf/
F: doc/guides/prog_guide/mbuf_lib.rst
F: app/test/test_mbuf.c
-Packet buffer offload - EXPERIMENTAL
-M: Declan Doherty <declan.doherty@intel.com>
-F: lib/librte_mbuf_offload/
-
Ethernet API
M: Thomas Monjalon <thomas.monjalon@6wind.com>
F: lib/librte_ether/
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 951b443..208fc14 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -35,7 +35,6 @@
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#include <rte_mbuf_offload.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
@@ -48,7 +47,7 @@ static enum rte_cryptodev_type gbl_cryptodev_type;
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
struct rte_cryptodev_config conf;
struct rte_cryptodev_qp_conf qp_conf;
@@ -62,8 +61,7 @@ struct crypto_unittest_params {
struct rte_cryptodev_sym_session *sess;
- struct rte_mbuf_offload *ol;
- struct rte_crypto_sym_op *op;
+ struct rte_crypto_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -104,7 +102,7 @@ setup_test_string(struct rte_mempool *mpool,
return m;
}
-#if HEX_DUMP
+#ifdef HEX_DUMP
static void
hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
{
@@ -112,27 +110,29 @@ hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
}
#endif
-static struct rte_mbuf *
-process_crypto_request(uint8_t dev_id, struct rte_mbuf *ibuf)
+static struct rte_crypto_op *
+process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)
{
- struct rte_mbuf *obuf = NULL;
-#if HEX_DUMP
+#ifdef HEX_DUMP
hexdump_mbuf_data(stdout, "Enqueued Packet", ibuf);
#endif
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &ibuf, 1) != 1) {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
printf("Error sending packet for encryption");
return NULL;
}
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &obuf, 1) == 0)
+
+ op = NULL;
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
rte_pause();
-#if HEX_DUMP
+#ifdef HEX_DUMP
if (obuf)
hexdump_mbuf_data(stdout, "Dequeued Packet", obuf);
#endif
- return obuf;
+ return op;
}
static struct crypto_testsuite_params testsuite_params = { NULL };
@@ -162,13 +162,14 @@ testsuite_setup(void)
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "MBUF_OFFLOAD_POOL",
+ ts_params->op_mpool = rte_crypto_op_pool_create(
+ "MBUF_CRYPTO_SYM_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_sym_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -253,10 +254,9 @@ testsuite_teardown(void)
rte_mempool_count(ts_params->mbuf_pool));
}
-
- if (ts_params->mbuf_ol_pool != NULL) {
+ if (ts_params->op_mpool != NULL) {
RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n",
- rte_mempool_count(ts_params->mbuf_ol_pool));
+ rte_mempool_count(ts_params->op_mpool));
}
}
@@ -326,8 +326,8 @@ ut_teardown(void)
}
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
/*
* free mbuf - both obuf and ibuf are usually the same,
@@ -680,76 +680,77 @@ static uint8_t aes_cbc_iv[] = {
/* ***** AES-CBC / HMAC-SHA1 Hash Tests ***** */
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_ciphertext[] = {
- 0x8B, 0X4D, 0XDA, 0X1B, 0XCF, 0X04, 0XA0, 0X31,
- 0XB4, 0XBF, 0XBD, 0X68, 0X43, 0X20, 0X7E, 0X76,
- 0XB1, 0X96, 0X8B, 0XA2, 0X7C, 0XA2, 0X83, 0X9E,
- 0X39, 0X5A, 0X2F, 0X7E, 0X92, 0XB4, 0X48, 0X1A,
- 0X3F, 0X6B, 0X5D, 0XDF, 0X52, 0X85, 0X5F, 0X8E,
- 0X42, 0X3C, 0XFB, 0XE9, 0X1A, 0X24, 0XD6, 0X08,
- 0XDD, 0XFD, 0X16, 0XFB, 0XE9, 0X55, 0XEF, 0XF0,
- 0XA0, 0X8D, 0X13, 0XAB, 0X81, 0XC6, 0X90, 0X01,
- 0XB5, 0X18, 0X84, 0XB3, 0XF6, 0XE6, 0X11, 0X57,
- 0XD6, 0X71, 0XC6, 0X3C, 0X3F, 0X2F, 0X33, 0XEE,
- 0X24, 0X42, 0X6E, 0XAC, 0X0B, 0XCA, 0XEC, 0XF9,
- 0X84, 0XF8, 0X22, 0XAA, 0X60, 0XF0, 0X32, 0XA9,
- 0X75, 0X75, 0X3B, 0XCB, 0X70, 0X21, 0X0A, 0X8D,
- 0X0F, 0XE0, 0XC4, 0X78, 0X2B, 0XF8, 0X97, 0XE3,
- 0XE4, 0X26, 0X4B, 0X29, 0XDA, 0X88, 0XCD, 0X46,
- 0XEC, 0XAA, 0XF9, 0X7F, 0XF1, 0X15, 0XEA, 0XC3,
- 0X87, 0XE6, 0X31, 0XF2, 0XCF, 0XDE, 0X4D, 0X80,
- 0X70, 0X91, 0X7E, 0X0C, 0XF7, 0X26, 0X3A, 0X92,
- 0X4F, 0X18, 0X83, 0XC0, 0X8F, 0X59, 0X01, 0XA5,
- 0X88, 0XD1, 0XDB, 0X26, 0X71, 0X27, 0X16, 0XF5,
- 0XEE, 0X10, 0X82, 0XAC, 0X68, 0X26, 0X9B, 0XE2,
- 0X6D, 0XD8, 0X9A, 0X80, 0XDF, 0X04, 0X31, 0XD5,
- 0XF1, 0X35, 0X5C, 0X3B, 0XDD, 0X9A, 0X65, 0XBA,
- 0X58, 0X34, 0X85, 0X61, 0X1C, 0X42, 0X10, 0X76,
- 0X73, 0X02, 0X42, 0XC9, 0X23, 0X18, 0X8E, 0XB4,
- 0X6F, 0XB4, 0XA3, 0X54, 0X6E, 0X88, 0X3B, 0X62,
- 0X7C, 0X02, 0X8D, 0X4C, 0X9F, 0XC8, 0X45, 0XF4,
- 0XC9, 0XDE, 0X4F, 0XEB, 0X22, 0X83, 0X1B, 0XE4,
- 0X49, 0X37, 0XE4, 0XAD, 0XE7, 0XCD, 0X21, 0X54,
- 0XBC, 0X1C, 0XC2, 0X04, 0X97, 0XB4, 0X10, 0X61,
- 0XF0, 0XE4, 0XEF, 0X27, 0X63, 0X3A, 0XDA, 0X91,
- 0X41, 0X25, 0X62, 0X1C, 0X5C, 0XB6, 0X38, 0X4A,
- 0X88, 0X71, 0X59, 0X5A, 0X8D, 0XA0, 0X09, 0XAF,
- 0X72, 0X94, 0XD7, 0X79, 0X5C, 0X60, 0X7C, 0X8F,
- 0X4C, 0XF5, 0XD9, 0XA1, 0X39, 0X6D, 0X81, 0X28,
- 0XEF, 0X13, 0X28, 0XDF, 0XF5, 0X3E, 0XF7, 0X8E,
- 0X09, 0X9C, 0X78, 0X18, 0X79, 0XB8, 0X68, 0XD7,
- 0XA8, 0X29, 0X62, 0XAD, 0XDE, 0XE1, 0X61, 0X76,
- 0X1B, 0X05, 0X16, 0XCD, 0XBF, 0X02, 0X8E, 0XA6,
- 0X43, 0X6E, 0X92, 0X55, 0X4F, 0X60, 0X9C, 0X03,
- 0XB8, 0X4F, 0XA3, 0X02, 0XAC, 0XA8, 0XA7, 0X0C,
- 0X1E, 0XB5, 0X6B, 0XF8, 0XC8, 0X4D, 0XDE, 0XD2,
- 0XB0, 0X29, 0X6E, 0X40, 0XE6, 0XD6, 0XC9, 0XE6,
- 0XB9, 0X0F, 0XB6, 0X63, 0XF5, 0XAA, 0X2B, 0X96,
- 0XA7, 0X16, 0XAC, 0X4E, 0X0A, 0X33, 0X1C, 0XA6,
- 0XE6, 0XBD, 0X8A, 0XCF, 0X40, 0XA9, 0XB2, 0XFA,
- 0X63, 0X27, 0XFD, 0X9B, 0XD9, 0XFC, 0XD5, 0X87,
- 0X8D, 0X4C, 0XB6, 0XA4, 0XCB, 0XE7, 0X74, 0X55,
- 0XF4, 0XFB, 0X41, 0X25, 0XB5, 0X4B, 0X0A, 0X1B,
- 0XB1, 0XD6, 0XB7, 0XD9, 0X47, 0X2A, 0XC3, 0X98,
- 0X6A, 0XC4, 0X03, 0X73, 0X1F, 0X93, 0X6E, 0X53,
- 0X19, 0X25, 0X64, 0X15, 0X83, 0XF9, 0X73, 0X2A,
- 0X74, 0XB4, 0X93, 0X69, 0XC4, 0X72, 0XFC, 0X26,
- 0XA2, 0X9F, 0X43, 0X45, 0XDD, 0XB9, 0XEF, 0X36,
- 0XC8, 0X3A, 0XCD, 0X99, 0X9B, 0X54, 0X1A, 0X36,
- 0XC1, 0X59, 0XF8, 0X98, 0XA8, 0XCC, 0X28, 0X0D,
- 0X73, 0X4C, 0XEE, 0X98, 0XCB, 0X7C, 0X58, 0X7E,
- 0X20, 0X75, 0X1E, 0XB7, 0XC9, 0XF8, 0XF2, 0X0E,
- 0X63, 0X9E, 0X05, 0X78, 0X1A, 0XB6, 0XA8, 0X7A,
- 0XF9, 0X98, 0X6A, 0XA6, 0X46, 0X84, 0X2E, 0XF6,
- 0X4B, 0XDC, 0X9B, 0X8F, 0X9B, 0X8F, 0XEE, 0XB4,
- 0XAA, 0X3F, 0XEE, 0XC0, 0X37, 0X27, 0X76, 0XC7,
- 0X95, 0XBB, 0X26, 0X74, 0X69, 0X12, 0X7F, 0XF1,
- 0XBB, 0XFF, 0XAE, 0XB5, 0X99, 0X6E, 0XCB, 0X0C
+ 0x8B, 0x4D, 0xDA, 0x1B, 0xCF, 0x04, 0xA0, 0x31,
+ 0xB4, 0xBF, 0xBD, 0x68, 0x43, 0x20, 0x7E, 0x76,
+ 0xB1, 0x96, 0x8B, 0xA2, 0x7C, 0xA2, 0x83, 0x9E,
+ 0x39, 0x5A, 0x2F, 0x7E, 0x92, 0xB4, 0x48, 0x1A,
+ 0x3F, 0x6B, 0x5D, 0xDF, 0x52, 0x85, 0x5F, 0x8E,
+ 0x42, 0x3C, 0xFB, 0xE9, 0x1A, 0x24, 0xD6, 0x08,
+ 0xDD, 0xFD, 0x16, 0xFB, 0xE9, 0x55, 0xEF, 0xF0,
+ 0xA0, 0x8D, 0x13, 0xAB, 0x81, 0xC6, 0x90, 0x01,
+ 0xB5, 0x18, 0x84, 0xB3, 0xF6, 0xE6, 0x11, 0x57,
+ 0xD6, 0x71, 0xC6, 0x3C, 0x3F, 0x2F, 0x33, 0xEE,
+ 0x24, 0x42, 0x6E, 0xAC, 0x0B, 0xCA, 0xEC, 0xF9,
+ 0x84, 0xF8, 0x22, 0xAA, 0x60, 0xF0, 0x32, 0xA9,
+ 0x75, 0x75, 0x3B, 0xCB, 0x70, 0x21, 0x0A, 0x8D,
+ 0x0F, 0xE0, 0xC4, 0x78, 0x2B, 0xF8, 0x97, 0xE3,
+ 0xE4, 0x26, 0x4B, 0x29, 0xDA, 0x88, 0xCD, 0x46,
+ 0xEC, 0xAA, 0xF9, 0x7F, 0xF1, 0x15, 0xEA, 0xC3,
+ 0x87, 0xE6, 0x31, 0xF2, 0xCF, 0xDE, 0x4D, 0x80,
+ 0x70, 0x91, 0x7E, 0x0C, 0xF7, 0x26, 0x3A, 0x92,
+ 0x4F, 0x18, 0x83, 0xC0, 0x8F, 0x59, 0x01, 0xA5,
+ 0x88, 0xD1, 0xDB, 0x26, 0x71, 0x27, 0x16, 0xF5,
+ 0xEE, 0x10, 0x82, 0xAC, 0x68, 0x26, 0x9B, 0xE2,
+ 0x6D, 0xD8, 0x9A, 0x80, 0xDF, 0x04, 0x31, 0xD5,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x73, 0x02, 0x42, 0xC9, 0x23, 0x18, 0x8E, 0xB4,
+ 0x6F, 0xB4, 0xA3, 0x54, 0x6E, 0x88, 0x3B, 0x62,
+ 0x7C, 0x02, 0x8D, 0x4C, 0x9F, 0xC8, 0x45, 0xF4,
+ 0xC9, 0xDE, 0x4F, 0xEB, 0x22, 0x83, 0x1B, 0xE4,
+ 0x49, 0x37, 0xE4, 0xAD, 0xE7, 0xCD, 0x21, 0x54,
+ 0xBC, 0x1C, 0xC2, 0x04, 0x97, 0xB4, 0x10, 0x61,
+ 0xF0, 0xE4, 0xEF, 0x27, 0x63, 0x3A, 0xDA, 0x91,
+ 0x41, 0x25, 0x62, 0x1C, 0x5C, 0xB6, 0x38, 0x4A,
+ 0x88, 0x71, 0x59, 0x5A, 0x8D, 0xA0, 0x09, 0xAF,
+ 0x72, 0x94, 0xD7, 0x79, 0x5C, 0x60, 0x7C, 0x8F,
+ 0x4C, 0xF5, 0xD9, 0xA1, 0x39, 0x6D, 0x81, 0x28,
+ 0xEF, 0x13, 0x28, 0xDF, 0xF5, 0x3E, 0xF7, 0x8E,
+ 0x09, 0x9C, 0x78, 0x18, 0x79, 0xB8, 0x68, 0xD7,
+ 0xA8, 0x29, 0x62, 0xAD, 0xDE, 0xE1, 0x61, 0x76,
+ 0x1B, 0x05, 0x16, 0xCD, 0xBF, 0x02, 0x8E, 0xA6,
+ 0x43, 0x6E, 0x92, 0x55, 0x4F, 0x60, 0x9C, 0x03,
+ 0xB8, 0x4F, 0xA3, 0x02, 0xAC, 0xA8, 0xA7, 0x0C,
+ 0x1E, 0xB5, 0x6B, 0xF8, 0xC8, 0x4D, 0xDE, 0xD2,
+ 0xB0, 0x29, 0x6E, 0x40, 0xE6, 0xD6, 0xC9, 0xE6,
+ 0xB9, 0x0F, 0xB6, 0x63, 0xF5, 0xAA, 0x2B, 0x96,
+ 0xA7, 0x16, 0xAC, 0x4E, 0x0A, 0x33, 0x1C, 0xA6,
+ 0xE6, 0xBD, 0x8A, 0xCF, 0x40, 0xA9, 0xB2, 0xFA,
+ 0x63, 0x27, 0xFD, 0x9B, 0xD9, 0xFC, 0xD5, 0x87,
+ 0x8D, 0x4C, 0xB6, 0xA4, 0xCB, 0xE7, 0x74, 0x55,
+ 0xF4, 0xFB, 0x41, 0x25, 0xB5, 0x4B, 0x0A, 0x1B,
+ 0xB1, 0xD6, 0xB7, 0xD9, 0x47, 0x2A, 0xC3, 0x98,
+ 0x6A, 0xC4, 0x03, 0x73, 0x1F, 0x93, 0x6E, 0x53,
+ 0x19, 0x25, 0x64, 0x15, 0x83, 0xF9, 0x73, 0x2A,
+ 0x74, 0xB4, 0x93, 0x69, 0xC4, 0x72, 0xFC, 0x26,
+ 0xA2, 0x9F, 0x43, 0x45, 0xDD, 0xB9, 0xEF, 0x36,
+ 0xC8, 0x3A, 0xCD, 0x99, 0x9B, 0x54, 0x1A, 0x36,
+ 0xC1, 0x59, 0xF8, 0x98, 0xA8, 0xCC, 0x28, 0x0D,
+ 0x73, 0x4C, 0xEE, 0x98, 0xCB, 0x7C, 0x58, 0x7E,
+ 0x20, 0x75, 0x1E, 0xB7, 0xC9, 0xF8, 0xF2, 0x0E,
+ 0x63, 0x9E, 0x05, 0x78, 0x1A, 0xB6, 0xA8, 0x7A,
+ 0xF9, 0x98, 0x6A, 0xA6, 0x46, 0x84, 0x2E, 0xF6,
+ 0x4B, 0xDC, 0x9B, 0x8F, 0x9B, 0x8F, 0xEE, 0xB4,
+ 0xAA, 0x3F, 0xEE, 0xC0, 0x37, 0x27, 0x76, 0xC7,
+ 0x95, 0xBB, 0x26, 0x74, 0x69, 0x12, 0x7F, 0xF1,
+ 0xBB, 0xFF, 0xAE, 0xB5, 0x99, 0x6E, 0xCB, 0x0C
};
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest[] = {
- 0x9a, 0X4f, 0X88, 0X1b, 0Xb6, 0X8f, 0Xd8, 0X60,
- 0X42, 0X1a, 0X7d, 0X3d, 0Xf5, 0X82, 0X80, 0Xf1,
- 0X18, 0X8c, 0X1d, 0X32 };
+ 0x9a, 0x4f, 0x88, 0x1b, 0xb6, 0x8f, 0xd8, 0x60,
+ 0x42, 0x1a, 0x7d, 0x3d, 0xf5, 0x82, 0x80, 0xf1,
+ 0x18, 0x8c, 0x1d, 0x32
+};
static int
@@ -776,7 +777,6 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
-
ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
@@ -786,59 +786,65 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.key.data = hmac_sha1_key;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
- /* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ /* Create crypto session*/
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
- /* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ /* Generate crypto op data structure */
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ /* Set crypto operation authentication parameters */
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ /* Set crypto operation cipher parameters */
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ uint8_t *ciphertext = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC);
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(ciphertext,
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
- "Ciphertext data not as expected");
+ "ciphertext data not as expected");
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ uint8_t *digest = ciphertext + QUOTE_512_BYTES;
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(digest,
catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest,
gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 :
@@ -863,60 +869,66 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
- ut_params->ol, 2),
+ TEST_ASSERT_NOT_NULL(rte_crypto_op_sym_xforms_alloc(ut_params->op, 2),
"failed to allocate space for crypto transforms");
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ sym_op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
/* cipher parameters */
- ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- ut_params->op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->op->xform->cipher.key.data = aes_cbc_key;
- ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
+ sym_op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ sym_op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ sym_op->xform->cipher.key.data = aes_cbc_key;
+ sym_op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ sym_op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- ut_params->op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
- ut_params->op->xform->next->auth.key.data = hmac_sha1_key;
- ut_params->op->xform->next->auth.digest_length =
+ sym_op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ sym_op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ sym_op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
+ sym_op->xform->next->auth.key.data = hmac_sha1_key;
+ sym_op->xform->next->auth.digest_length =
DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -985,42 +997,48 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ /* attach symmetric crypto session to crypto operations */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
+
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1088,47 +1106,51 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1197,42 +1219,47 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
- /* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1244,7 +1271,6 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
"Digest verification failed");
-
return TEST_SUCCESS;
}
@@ -1312,43 +1338,46 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
-
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1367,7 +1396,6 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
DIGEST_BYTE_LENGTH_SHA512,
"Generated digest data not as expected");
-
return TEST_SUCCESS;
}
@@ -1424,6 +1452,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
ut_params->auth_xform.auth.key.data = hmac_sha512_key;
ut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA512;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
+
return TEST_SUCCESS;
}
@@ -1448,43 +1477,46 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
- /* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1497,8 +1529,6 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
"Digest verification failed");
-
-
return TEST_SUCCESS;
}
@@ -1524,10 +1554,6 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
catch_22_quote, QUOTE_512_BYTES, 0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_AES_XCBC);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
@@ -1548,56 +1574,75 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->iv.data = (uint8_t *)
- rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ /* Set operation cipher parameters */
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->m_src, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(sym_op->m_src);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
+
+ /* Set operation authentication parameters */
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ sym_op->m_src, DIGEST_BYTE_LENGTH_AES_XCBC);
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->m_src,
+ CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES);
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+ memset(sym_op->auth.digest.data, 0, DIGEST_BYTE_LENGTH_AES_XCBC);
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC),
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
"Ciphertext data not as expected");
+
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ rte_pktmbuf_mtod_offset(
+ ut_params->op->sym->m_src, uint8_t *,
+ CIPHER_IV_LENGTH_AES_CBC +
+ QUOTE_512_BYTES),
catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
- DIGEST_BYTE_LENGTH_AES_XCBC,
+ gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
+ TRUNCATED_DIGEST_BYTE_LENGTH_AES_XCBC :
+ DIGEST_BYTE_LENGTH_AES_XCBC,
"Generated digest data not as expected");
return TEST_SUCCESS;
@@ -1614,14 +1659,6 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
(const char *)catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES, 0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_AES_XCBC);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
- rte_memcpy(ut_params->digest,
- catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
- DIGEST_BYTE_LENGTH_AES_XCBC);
-
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
@@ -1648,35 +1685,55 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ /* Set crypto operation data parameters */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ ut_params->ibuf, DIGEST_BYTE_LENGTH_AES_XCBC);
+ TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+ "no room to append digest");
+
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ ut_params->ibuf, QUOTE_512_BYTES);
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+ rte_memcpy(sym_op->auth.digest.data,
+ catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
+ DIGEST_BYTE_LENGTH_AES_XCBC);
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1837,50 +1894,53 @@ test_not_in_place_crypto(void)
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+ sym_op->m_dst = dst_m;
+
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
-
- ut_params->op->dst.m = dst_m;
- ut_params->op->dst.offset = 0;
-
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "no crypto operation returned");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto operation processing failed");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->op->dst.m, char *),
+ rte_pktmbuf_mtod(ut_params->op->sym->m_dst, char *),
catch_22_quote,
QUOTE_512_BYTES,
"Plaintext data not as expected");
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 034393e..c84ba42 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -32,8 +32,6 @@
#ifndef TEST_CRYPTODEV_H_
#define TEST_CRYPTODEV_H_
-#define HEX_DUMP 0
-
#define FALSE 0
#define TRUE 1
@@ -47,8 +45,9 @@
#define DEFAULT_NUM_XFORMS (2)
#define NUM_MBUFS (8191)
#define MBUF_CACHE_SIZE (250)
-#define MBUF_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512 + \
- sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATAPAYLOAD_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512)
+#define MBUF_SIZE (sizeof(struct rte_mbuf) + \
+ RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
#define BYTE_LENGTH(x) (x/8)
/* HASH DIGEST LENGTHS */
@@ -62,7 +61,9 @@
#define AES_XCBC_MAC_KEY_SZ (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 (12)
+#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA224 (14)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA256 (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA512 (32)
+#define TRUNCATED_DIGEST_BYTE_LENGTH_AES_XCBC (12)
#endif /* TEST_CRYPTODEV_H_ */
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 42dd9bc..b3f4fd9 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -32,7 +32,6 @@
#include <rte_common.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
@@ -50,7 +49,7 @@
struct crypto_testsuite_params {
struct rte_mempool *mbuf_mp;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
uint16_t nb_queue_pairs;
@@ -68,8 +67,7 @@ struct crypto_unittest_params {
struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_sym_op *op;
- struct rte_mbuf_offload *ol;
+ struct rte_crypto_op *op;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
struct rte_mbuf *ibuf[MAX_NUM_OF_OPS_PER_UT];
@@ -79,7 +77,7 @@ struct crypto_unittest_params {
static struct rte_mbuf *
setup_test_string(struct rte_mempool *mpool,
- const char *string, size_t len, uint8_t blocksize)
+ const uint8_t *data, size_t len, uint8_t blocksize)
{
struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
size_t t_len = len - (blocksize ? (len % blocksize) : 0);
@@ -92,7 +90,7 @@ setup_test_string(struct rte_mempool *mpool,
return NULL;
}
- rte_memcpy(dst, string, t_len);
+ rte_memcpy(dst, (const void *)data, t_len);
}
return m;
}
@@ -113,23 +111,24 @@ testsuite_setup(void)
ts_params->mbuf_mp = rte_mempool_lookup("CRYPTO_PERF_MBUFPOOL");
if (ts_params->mbuf_mp == NULL) {
/* Not already created so create */
- ts_params->mbuf_mp = rte_mempool_create("CRYPTO_PERF_MBUFPOOL", NUM_MBUFS,
- MBUF_SIZE, MBUF_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
- rte_socket_id(), 0);
+ ts_params->mbuf_mp = rte_pktmbuf_pool_create(
+ "CRYPTO_PERF_MBUFPOOL",
+ NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
+ rte_socket_id());
if (ts_params->mbuf_mp == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_PERF_MBUFPOOL\n");
return TEST_FAILED;
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
- NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_sym_xform),
- rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+
+ ts_params->op_mpool = rte_crypto_op_pool_create("CRYPTO_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ NUM_MBUFS, MBUF_CACHE_SIZE,
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_sym_xform),
+ rte_socket_id());
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -256,8 +255,8 @@ ut_teardown(void)
ut_params->sess);
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
for (i = 0; i < MAX_NUM_OF_OPS_PER_UT; i++) {
if (ut_params->obuf[i])
@@ -1698,11 +1697,12 @@ struct crypto_data_params aes_cbc_hmac_sha256_output[MAX_PACKET_SIZE_INDEX] = {
static int
test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
{
- uint32_t num_to_submit = 2048, max_outstanding_reqs = 512;
- struct rte_mbuf *rx_mbufs[num_to_submit], *tx_mbufs[num_to_submit];
+ uint32_t num_to_submit = 4096;
+ struct rte_crypto_op *c_ops[num_to_submit];
+ struct rte_crypto_op *proc_ops[num_to_submit];
uint64_t failed_polls, retries, start_cycles, end_cycles, total_cycles = 0;
uint32_t burst_sent, burst_received;
- uint32_t b, burst_size, num_sent, num_received;
+ uint32_t i, burst_size, num_sent, num_received;
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1739,46 +1739,50 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure(s) */
- for (b = 0; b < num_to_submit ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ for (i = 0; i < num_to_submit ; i++) {
+ struct rte_mbuf *m = setup_test_string(ts_params->mbuf_mp,
+ data_params[0].expected.ciphertext,
data_params[0].length, 0);
- TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
+ TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(tx_mbufs[b],
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ TEST_ASSERT_NOT_NULL(ut_params->digest,
+ "no room to append digest");
rte_memcpy(ut_params->digest, data_params[0].expected.digest,
DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_sym_op *cop = &ol->op.crypto;
+ struct rte_crypto_op *op =
+ rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+ rte_crypto_op_attach_sym_session(op, ut_params->sess);
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
+ op->sym->auth.digest.data = ut_params->digest;
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
data_params[0].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->auth.data.length = data_params[0].length;
+
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ op->sym->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(m,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[0].length;
+ op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.data.length = data_params[0].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[0].length;
+ op->sym->m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[i] = op;
}
printf("\nTest to measure the IA cycle cost using AES128_CBC_SHA256_HMAC "
@@ -1789,17 +1793,17 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
printf("\nDev No\tQP No\tNum Sent\tNum Received\tTx/Rx burst");
printf("\tRetries (Device Busy)\tAverage IA cycle cost "
"(assuming 0 retries)");
- for (b = 2; b <= 128 ; b *= 2) {
+ for (i = 2; i <= 128 ; i *= 2) {
num_sent = 0;
num_received = 0;
retries = 0;
failed_polls = 0;
- burst_size = b;
+ burst_size = i;
total_cycles = 0;
while (num_sent < num_to_submit) {
start_cycles = rte_rdtsc_precise();
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0,
- &tx_mbufs[num_sent],
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, &c_ops[num_sent],
((num_to_submit-num_sent) < burst_size) ?
num_to_submit-num_sent : burst_size);
if (burst_sent == 0)
@@ -1814,9 +1818,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1824,15 +1827,15 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
end_cycles = rte_rdtsc_precise();
total_cycles += end_cycles - start_cycles;
}
+
while (num_received != num_to_submit) {
if (gbl_cryptodev_preftest_devtype ==
- RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num,
- 0, NULL, 0);
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1846,16 +1849,9 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
printf("\n");
- for (b = 0; b < max_outstanding_reqs ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ for (i = 0; i < num_to_submit ; i++) {
+ rte_pktmbuf_free(c_ops[i]->sym->m_src);
+ rte_crypto_op_free(c_ops[i]);
}
return TEST_SUCCESS;
}
@@ -1865,11 +1861,14 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
{
uint16_t index;
uint32_t burst_sent, burst_received;
- uint32_t b, num_sent, num_received, throughput;
+ uint32_t b, num_sent, num_received;
uint64_t failed_polls, retries, start_cycles, end_cycles;
const uint64_t mhz = rte_get_tsc_hz()/1000000;
- double mmps;
- struct rte_mbuf *rx_mbufs[DEFAULT_BURST_SIZE], *tx_mbufs[DEFAULT_BURST_SIZE];
+ double throughput, mmps;
+
+ struct rte_crypto_op *c_ops[DEFAULT_BURST_SIZE];
+ struct rte_crypto_op *proc_ops[DEFAULT_BURST_SIZE];
+
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1908,7 +1907,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
"AES128_CBC_SHA256_HMAC requests with a constant burst "
"size of %u while varying payload sizes", DEFAULT_BURST_SIZE);
printf("\nDev No\tQP No\tReq Size(B)\tNum Sent\tNum Received\t"
- "Mrps\tThoughput(Mbps)");
+ "Mrps\tThoughput(Gbps)");
printf("\tRetries (Attempted a burst, but the device was busy)");
for (index = 0; index < MAX_PACKET_SIZE_INDEX; index++) {
num_sent = 0;
@@ -1918,63 +1917,70 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
+ struct rte_mbuf *m = setup_test_string(
+ ts_params->mbuf_mp,
+ (const uint8_t *)
data_params[index].plaintext,
data_params[index].length,
0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(
- tx_mbufs[b], DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
+ DIGEST_BYTE_LENGTH_SHA256);
+ TEST_ASSERT_NOT_NULL(ut_params->digest
+ , "no room to append digest");
- rte_memcpy(ut_params->digest, data_params[index].expected.digest,
- DIGEST_BYTE_LENGTH_SHA256);
+ rte_memcpy(ut_params->digest,
+ data_params[index].expected.digest,
+ DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
+ struct rte_crypto_op *op = rte_crypto_op_alloc(
+ ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- struct rte_crypto_sym_op *cop = &ol->op.crypto;
+ rte_crypto_op_attach_sym_session(op, ut_params->sess);
- rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+ op->sym->auth.digest.data = ut_params->digest;
+ op->sym->auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(m,
+ data_params[index].length);
+ op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
- tx_mbufs[b], data_params[index].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->auth.data.length = data_params[index].length;
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ op->sym->cipher.iv.data = (uint8_t *)
+ rte_pktmbuf_prepend(m,
+ CIPHER_IV_LENGTH_AES_CBC);
+ op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.data.length = data_params[index].length;
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[index].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[index].length;
+ op->sym->m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[b] = op;
}
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num,
- 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
- < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
- DEFAULT_BURST_SIZE);
+ uint16_t burst_size = (DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent) < DEFAULT_BURST_SIZE ?
+ DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent : DEFAULT_BURST_SIZE;
+
+ burst_sent = rte_cryptodev_enqueue_burst(
+ dev_num, 0, c_ops, burst_size);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, DEFAULT_BURST_SIZE);
+ burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ 0, proc_ops, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
@@ -1982,37 +1988,34 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
if (gbl_cryptodev_preftest_devtype ==
- RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num,
- 0, NULL, 0);
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num, 0,
- rx_mbufs, DEFAULT_BURST_SIZE);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops,
+ DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
num_received += burst_received;
}
end_cycles = rte_rdtsc_precise();
- mmps = (double)num_received*mhz/(end_cycles - start_cycles);
- throughput = mmps*data_params[index].length*8;
+ mmps = ((double)num_received * mhz) /
+ (end_cycles - start_cycles);
+ throughput = (mmps * data_params[index].length * 8) / 1000;
+
printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
- data_params[index].length, num_sent, num_received);
- printf("\t%.2f\t%u", mmps, throughput);
+ data_params[index].length,
+ num_sent, num_received);
+ printf("\t%.2f\t%.2f", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ rte_pktmbuf_free(c_ops[b]->sym->m_src);
+ rte_crypto_op_free(c_ops[b]);
}
}
+
printf("\n");
return TEST_SUCCESS;
}
diff --git a/config/common_base b/config/common_base
index 1af28c8..3066e9a 100644
--- a/config/common_base
+++ b/config/common_base
@@ -360,13 +360,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
#
-# Compile librte_mbuf_offload
-# EXPERIMENTAL: API may change without prior notice
-#
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n
-
-#
# Compile librte_timer
#
CONFIG_RTE_LIBRTE_TIMER=y
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 7a91001..f626386 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -104,7 +104,6 @@ There are many libraries, so their headers may be grouped by topics:
- **containers**:
[mbuf] (@ref rte_mbuf.h),
- [mbuf_offload] (@ref rte_mbuf_offload.h),
[ring] (@ref rte_ring.h),
[distributor] (@ref rte_distributor.h),
[reorder] (@ref rte_reorder.h),
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 61d93cd..5b26444 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -37,7 +37,6 @@
#include <rte_dev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
-#include <rte_mbuf_offload.h>
#include "rte_aesni_mb_pmd_private.h"
@@ -296,16 +295,16 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(crypto_op->session->type !=
+ if (op->sym->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
- sess = (struct aesni_mb_session *)crypto_op->session->_private;
+ sess = (struct aesni_mb_session *)op->sym->session->_private;
} else {
void *_sess = NULL;
@@ -316,7 +315,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
- sess, crypto_op->xform) != 0)) {
+ sess, op->sym->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
}
@@ -338,12 +337,14 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
* - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
*/
static JOB_AES_HMAC *
-process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_sym_op *c_op,
+process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
+ struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+ uint16_t m_offset = 0;
+
job = (*qp->ops->job.get_next)(&qp->mb_mgr);
if (unlikely(job == NULL))
return job;
@@ -372,49 +373,65 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
}
/* Mutable crypto operation parameters */
+ if (op->sym->m_dst) {
+ m_src = m_dst = op->sym->m_dst;
+
+ /* append space for output data to mbuf */
+ char *odata = rte_pktmbuf_append(m_dst,
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (odata == NULL)
+ MB_LOG_ERR("failed to allocate space in destination "
+ "mbuf for source data");
+
+ memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ } else {
+ m_dst = m_src;
+ m_offset = op->sym->cipher.data.offset;
+ }
/* Set digest output location */
if (job->cipher_direction == DECRYPT) {
- job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m,
+ job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
get_digest_byte_length(job->hash_alg));
- if (job->auth_tag_output)
- memset(job->auth_tag_output, 0,
- sizeof(get_digest_byte_length(job->hash_alg)));
- else
+ if (job->auth_tag_output == NULL) {
+ MB_LOG_ERR("failed to allocate space in output mbuf "
+ "for temp digest");
return NULL;
+ }
+
+ memset(job->auth_tag_output, 0,
+ sizeof(get_digest_byte_length(job->hash_alg)));
+
} else {
- job->auth_tag_output = c_op->digest.data;
+ job->auth_tag_output = op->sym->auth.digest.data;
}
/*
- * Multiple buffer library current only support returning a truncated
+ * Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
job->auth_tag_output_len_in_bytes =
get_truncated_digest_byte_length(job->hash_alg);
/* Set IV parameters */
- job->iv = c_op->iv.data;
- job->iv_len_in_bytes = c_op->iv.length;
+ job->iv = op->sym->cipher.iv.data;
+ job->iv_len_in_bytes = op->sym->cipher.iv.length;
/* Data Parameter */
- job->src = rte_pktmbuf_mtod(m, uint8_t *);
- job->dst = c_op->dst.m ?
- rte_pktmbuf_mtod(c_op->dst.m, uint8_t *) +
- c_op->dst.offset :
- rte_pktmbuf_mtod(m, uint8_t *) +
- c_op->data.to_cipher.offset;
+ job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
- job->cipher_start_src_offset_in_bytes = c_op->data.to_cipher.offset;
- job->msg_len_to_cipher_in_bytes = c_op->data.to_cipher.length;
+ job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
- job->hash_start_src_offset_in_bytes = c_op->data.to_hash.offset;
- job->msg_len_to_hash_in_bytes = c_op->data.to_hash.length;
+ job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
/* Set user data to be crypto operation data struct */
- job->user_data = m;
- job->user_data2 = c_op;
+ job->user_data = op;
+ job->user_data2 = m_dst;
return job;
}
@@ -429,43 +446,41 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
* verification of supplied digest in the case of a HASH_CIPHER operation
* - Returns NULL on invalid job
*/
-static struct rte_mbuf *
+static struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m;
- struct rte_crypto_sym_op *c_op;
+ struct rte_crypto_op *op =
+ (struct rte_crypto_op *)job->user_data;
+ struct rte_mbuf *m_dst =
+ (struct rte_mbuf *)job->user_data2;
- if (job->user_data == NULL)
+ if (op == NULL || m_dst == NULL)
return NULL;
- /* handled retrieved job */
- m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_sym_op *)job->user_data2;
-
/* set status as successful by default */
- c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* check if job has been processed */
if (unlikely(job->status != STS_COMPLETED)) {
- c_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return m;
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return op;
} else if (job->chain_order == HASH_CIPHER) {
/* Verify digest if required */
- if (memcmp(job->auth_tag_output, c_op->digest.data,
+ if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
job->auth_tag_output_len_in_bytes) != 0)
- c_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m, get_digest_byte_length(job->hash_alg));
+ rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
- rte_mempool_put(qp->sess_mp, c_op->session);
- c_op->session = NULL;
+ if (op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
}
- return m;
+ return op;
}
/**
@@ -481,16 +496,16 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
static unsigned
handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m = NULL;
+ struct rte_crypto_op *op = NULL;
unsigned processed_jobs = 0;
while (job) {
processed_jobs++;
- m = post_process_mb_job(qp, job);
- if (m)
- rte_ring_enqueue(qp->processed_pkts, (void *)m);
+ op = post_process_mb_job(qp, job);
+ if (op)
+ rte_ring_enqueue(qp->processed_pkts, (void *)op);
else
- qp->qp_stats.dequeue_err_count++;
+ qp->stats.dequeue_err_count++;
job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
}
@@ -499,53 +514,53 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
static uint16_t
-aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
- uint16_t nb_bufs)
+aesni_mb_pmd_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
-
struct aesni_mb_session *sess;
- struct aesni_mb_qp *qp = queue_pair;
+ struct aesni_mb_qp *_qp = qp;
JOB_AES_HMAC *job = NULL;
int i, processed_jobs = 0;
- for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i],
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- if (unlikely(ol == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ for (i = 0; i < nb_ops; i++) {
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ MB_LOG_ERR("PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
-
- sess = get_session(qp, &ol->op.crypto);
+#endif
+ sess = get_session(_qp, ops[i]);
if (unlikely(sess == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
- job = process_crypto_op(qp, bufs[i], &ol->op.crypto, sess);
+ job = process_crypto_op(_qp, ops[i], sess);
if (unlikely(job == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
/* Submit Job */
- job = (*qp->ops->job.submit)(&qp->mb_mgr);
+ job = (*_qp->ops->job.submit)(&_qp->mb_mgr);
/*
* If submit returns a processed job then handle it,
* before submitting subsequent jobs
*/
if (job)
- processed_jobs += handle_completed_jobs(qp, job);
+ processed_jobs += handle_completed_jobs(_qp, job);
}
if (processed_jobs == 0)
goto flush_jobs;
else
- qp->qp_stats.enqueued_count += processed_jobs;
+ _qp->stats.enqueued_count += processed_jobs;
return i;
flush_jobs:
@@ -553,24 +568,24 @@ flush_jobs:
* If we haven't processed any jobs in submit loop, then flush jobs
* queue to stop the output stalling
*/
- job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
+ job = (*_qp->ops->job.flush_job)(&_qp->mb_mgr);
if (job)
- qp->qp_stats.enqueued_count += handle_completed_jobs(qp, job);
+ _qp->stats.enqueued_count += handle_completed_jobs(_qp, job);
return i;
}
static uint16_t
-aesni_mb_pmd_dequeue_burst(void *queue_pair,
- struct rte_mbuf **bufs, uint16_t nb_bufs)
+aesni_mb_pmd_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct aesni_mb_qp *qp = queue_pair;
+ struct aesni_mb_qp *_qp = qp;
unsigned nb_dequeued;
- nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)bufs, nb_bufs);
- qp->qp_stats.dequeued_count += nb_dequeued;
+ nb_dequeued = rte_ring_dequeue_burst(_qp->processed_pkts,
+ (void **)ops, nb_ops);
+ _qp->stats.dequeued_count += nb_dequeued;
return nb_dequeued;
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 3cd9990..d56de12 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -76,11 +76,11 @@ aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- stats->enqueued_count += qp->qp_stats.enqueued_count;
- stats->dequeued_count += qp->qp_stats.dequeued_count;
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
- stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
- stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
}
}
@@ -93,7 +93,7 @@ aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
}
}
@@ -196,7 +196,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp->sess_mp = dev->data->session_pool;
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
/* Initialise multi-buffer manager */
(*qp->ops->job.init_mgr)(&qp->mb_mgr);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index ab70c15..0aed177 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -150,7 +150,7 @@ struct aesni_mb_qp {
/**< Ring for placing process packets */
struct rte_mempool *sess_mp;
/**< Session Mempool */
- struct rte_cryptodev_stats qp_stats;
+ struct rte_cryptodev_stats stats;
/**< Queue pair statistics */
} __rte_cache_aligned;
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index e7b9027..38dc956 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -59,7 +59,6 @@
#include <rte_mbuf.h>
#include <rte_string_fns.h>
#include <rte_spinlock.h>
-#include <rte_mbuf_offload.h>
#include <rte_hexdump.h>
#include "qat_logs.h"
@@ -72,7 +71,7 @@ static inline uint32_t
adf_modulo(uint32_t data, uint32_t shift);
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg);
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
void *session)
@@ -275,15 +274,16 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
register struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
register uint32_t nb_pkts_sent = 0;
- register struct rte_mbuf **cur_tx_pkt = tx_pkts;
+ register struct rte_crypto_op **cur_op = ops;
register int ret;
- uint16_t nb_pkts_possible = nb_pkts;
+ uint16_t nb_ops_possible = nb_ops;
register uint8_t *base_addr;
register uint32_t tail;
int overflow;
@@ -294,19 +294,17 @@ uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = queue->tail;
/* Find how many can actually fit on the ring */
- overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_pkts)
+ overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
- queue->max_inflights;
if (overflow > 0) {
rte_atomic16_sub(&tmp_qp->inflights16, overflow);
- nb_pkts_possible = nb_pkts - overflow;
- if (nb_pkts_possible == 0)
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
return 0;
}
- while (nb_pkts_sent != nb_pkts_possible) {
-
- ret = qat_alg_write_mbuf_entry(*cur_tx_pkt,
- base_addr + tail);
+ while (nb_pkts_sent != nb_ops_possible) {
+ ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
if (nb_pkts_sent == 0)
@@ -316,7 +314,7 @@ uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = adf_modulo(tail + queue->msg_size, queue->modulo);
nb_pkts_sent++;
- cur_tx_pkt++;
+ cur_op++;
}
kick_tail:
WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
@@ -327,14 +325,13 @@ kick_tail:
}
uint16_t
-qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
uint32_t msg_counter = 0;
- struct rte_mbuf *rx_mbuf;
+ struct rte_crypto_op *rx_op;
struct icp_qat_fw_comn_resp *resp_msg;
queue = &(tmp_qp->rx_q);
@@ -342,17 +339,20 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
((uint8_t *)queue->base_addr + queue->head);
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
- msg_counter != nb_pkts) {
- rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
+ msg_counter != nb_ops) {
+ rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
*(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
queue->head = adf_modulo(queue->head +
@@ -361,9 +361,8 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
resp_msg = (struct icp_qat_fw_comn_resp *)
((uint8_t *)queue->base_addr +
queue->head);
-
- *rx_pkts = rx_mbuf;
- rx_pkts++;
+ *ops = rx_op;
+ ops++;
msg_counter++;
}
if (msg_counter > 0) {
@@ -377,38 +376,36 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
}
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
{
- struct rte_mbuf_offload *ol;
-
struct qat_session *ctx;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
- if (unlikely(ol == NULL)) {
- PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
- "to (%p) mbuf.", mbuf);
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
return -EINVAL;
}
-
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
+#endif
+ if (unlikely(op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
- " requests mbuf (%p) is sessionless.", mbuf);
+ " requests, op (%p) is sessionless.", op);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type
- != RTE_CRYPTODEV_QAT_SYM_PMD)) {
+ if (unlikely(op->sym->session->type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
- ctx = (struct qat_session *)ol->op.crypto.session->_private;
+ ctx = (struct qat_session *)op->sym->session->_private;
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
*qat_req = ctx->fw_req;
- qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)mbuf;
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
/*
* The following code assumes:
@@ -416,37 +413,37 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
* - always in place.
*/
qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length = mbuf->data_len;
+ qat_req->comn_mid.src_length =
+ rte_pktmbuf_data_len(op->sym->m_src);
qat_req->comn_mid.dest_data_addr =
qat_req->comn_mid.src_data_addr =
- rte_pktmbuf_mtophys(mbuf);
-
+ rte_pktmbuf_mtophys(op->sym->m_src);
cipher_param = (void *)&qat_req->serv_specif_rqpars;
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
- cipher_param->cipher_length = ol->op.crypto.data.to_cipher.length;
- cipher_param->cipher_offset = ol->op.crypto.data.to_cipher.offset;
- if (ol->op.crypto.iv.length &&
- (ol->op.crypto.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array))) {
+ cipher_param->cipher_length = op->sym->cipher.data.length;
+ cipher_param->cipher_offset = op->sym->cipher.data.offset;
+ if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array))) {
rte_memcpy(cipher_param->u.cipher_IV_array,
- ol->op.crypto.iv.data, ol->op.crypto.iv.length);
+ op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
} else {
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr = ol->op.crypto.iv.phys_addr;
+ cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr;
}
- if (ol->op.crypto.digest.phys_addr) {
+ if (op->sym->auth.digest.phys_addr) {
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
- auth_param->auth_res_addr = ol->op.crypto.digest.phys_addr;
+ auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
}
- auth_param->auth_off = ol->op.crypto.data.to_hash.offset;
- auth_param->auth_len = ol->op.crypto.data.to_hash.length;
- auth_param->u1.aad_adr = ol->op.crypto.additional_auth.phys_addr;
+ auth_param->auth_off = op->sym->auth.data.offset;
+ auth_param->auth_len = op->sym->auth.data.length;
+ auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
/* (GCM) aad length(240 max) will be at this location after precompute */
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
@@ -457,9 +454,19 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
}
auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));
+ rte_hexdump(stdout, "src_data:",
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ op->sym->auth.digest.length);
+ rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
+ op->sym->auth.aad.length);
#endif
return 0;
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index e9f71fe..9323383 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -115,12 +115,12 @@ extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
-uint16_t
-qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
-uint16_t
-qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 85700fc..5e51aca 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -92,8 +92,8 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst;
internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 93de786..6d5aeeb 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -62,7 +62,6 @@
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_memcpy.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -85,6 +84,7 @@
*/
#define RTE_TEST_RX_DESC_DEFAULT 128
#define RTE_TEST_TX_DESC_DEFAULT 512
+
static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
@@ -104,6 +104,11 @@ struct pkt_buffer {
struct rte_mbuf *buffer[MAX_PKT_BURST];
};
+struct op_buffer {
+ unsigned len;
+ struct rte_crypto_op *buffer[MAX_PKT_BURST];
+};
+
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
@@ -112,6 +117,12 @@ enum l2fwd_crypto_xform_chain {
L2FWD_CRYPTO_HASH_CIPHER
};
+struct l2fwd_key {
+ uint8_t *data;
+ uint32_t length;
+ phys_addr_t phys_addr;
+};
+
/** l2fwd crypto application command line options */
struct l2fwd_crypto_options {
unsigned portmask;
@@ -127,7 +138,7 @@ struct l2fwd_crypto_options {
struct rte_crypto_sym_xform cipher_xform;
uint8_t ckey_data[32];
- struct rte_crypto_sym_key iv_key;
+ struct l2fwd_key iv_key;
uint8_t ivkey_data[16];
struct rte_crypto_sym_xform auth_xform;
@@ -142,10 +153,7 @@ struct l2fwd_crypto_params {
unsigned digest_length;
unsigned block_size;
- struct {
- uint8_t *data;
- uint16_t length;
- } iv;
+ struct l2fwd_key iv_key;
struct rte_cryptodev_sym_session *session;
};
@@ -157,14 +165,16 @@ struct lcore_queue_conf {
unsigned nb_crypto_devs;
unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
- struct pkt_buffer crypto_pkt_buf[RTE_MAX_ETHPORTS];
- struct pkt_buffer tx_pkt_buf[RTE_MAX_ETHPORTS];
+ struct op_buffer op_buf[RTE_MAX_ETHPORTS];
+ struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
} __rte_cache_aligned;
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
static const struct rte_eth_conf port_conf = {
.rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+ .max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
@@ -178,7 +188,7 @@ static const struct rte_eth_conf port_conf = {
};
struct rte_mempool *l2fwd_pktmbuf_pool;
-struct rte_mempool *l2fwd_mbuf_ol_pool;
+struct rte_mempool *l2fwd_crypto_op_pool;
/* Per-port statistics struct */
struct l2fwd_port_statistics {
@@ -203,7 +213,7 @@ struct l2fwd_crypto_statistics crypto_statistics[RTE_MAX_ETHPORTS];
/* A tsc-based timer responsible for triggering statistics printout */
#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
-#define MAX_TIMER_PERIOD 86400 /* 1 day max */
+#define MAX_TIMER_PERIOD 86400UL /* 1 day max */
/* default period is 10 seconds */
static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000;
@@ -292,20 +302,21 @@ static int
l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
struct l2fwd_crypto_params *cparams)
{
- struct rte_mbuf **pkt_buffer;
+ struct rte_crypto_op **op_buffer;
unsigned ret;
- pkt_buffer = (struct rte_mbuf **)
- qconf->crypto_pkt_buf[cparams->dev_id].buffer;
+ op_buffer = (struct rte_crypto_op **)
+ qconf->op_buf[cparams->dev_id].buffer;
+
+ ret = rte_cryptodev_enqueue_burst(cparams->dev_id,
+ cparams->qp_id, op_buffer, (uint16_t) n);
- ret = rte_cryptodev_enqueue_burst(cparams->dev_id, cparams->qp_id,
- pkt_buffer, (uint16_t) n);
crypto_statistics[cparams->dev_id].enqueued += ret;
if (unlikely(ret < n)) {
crypto_statistics[cparams->dev_id].errors += (n - ret);
do {
- rte_pktmbuf_offload_free(pkt_buffer[ret]->offload_ops);
- rte_pktmbuf_free(pkt_buffer[ret]);
+ rte_pktmbuf_free(op_buffer[ret]->sym->m_src);
+ rte_crypto_op_free(op_buffer[ret]);
} while (++ret < n);
}
@@ -313,7 +324,8 @@ l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
}
static int
-l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
+l2fwd_crypto_enqueue(struct rte_crypto_op *op,
+ struct l2fwd_crypto_params *cparams)
{
unsigned lcore_id, len;
struct lcore_queue_conf *qconf;
@@ -321,23 +333,23 @@ l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->crypto_pkt_buf[cparams->dev_id].len;
- qconf->crypto_pkt_buf[cparams->dev_id].buffer[len] = m;
+ len = qconf->op_buf[cparams->dev_id].len;
+ qconf->op_buf[cparams->dev_id].buffer[len] = op;
len++;
- /* enough pkts to be sent */
+ /* enough ops to be sent */
if (len == MAX_PKT_BURST) {
l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams);
len = 0;
}
- qconf->crypto_pkt_buf[cparams->dev_id].len = len;
+ qconf->op_buf[cparams->dev_id].len = len;
return 0;
}
static int
l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
- struct rte_mbuf_offload *ol,
+ struct rte_crypto_op *op,
struct l2fwd_crypto_params *cparams)
{
struct ether_hdr *eth_hdr;
@@ -375,43 +387,43 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_op_attach_sym_session(op, cparams->session);
/* Append space for digest to end of packet */
- ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
+ op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
cparams->digest_length);
- ol->op.crypto.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
- ol->op.crypto.digest.length = cparams->digest_length;
+ op->sym->auth.digest.length = cparams->digest_length;
+
+ op->sym->auth.data.offset = ipdata_offset;
+ op->sym->auth.data.length = data_len;
- ol->op.crypto.iv.data = cparams->iv_key.data;
- ol->op.crypto.iv.phys_addr = cparams->iv_key.phys_addr;
- ol->op.crypto.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_cipher.offset = ipdata_offset;
- ol->op.crypto.data.to_cipher.length = data_len;
+ op->sym->cipher.iv.data = cparams->iv_key.data;
+ op->sym->cipher.iv.phys_addr = cparams->iv_key.phys_addr;
+ op->sym->cipher.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_hash.offset = ipdata_offset;
- ol->op.crypto.data.to_hash.length = data_len;
+ op->sym->cipher.data.offset = ipdata_offset;
+ op->sym->cipher.data.length = data_len;
- rte_pktmbuf_offload_attach(m, ol);
+ op->sym->m_src = m;
- return l2fwd_crypto_enqueue(m, cparams);
+ return l2fwd_crypto_enqueue(op, cparams);
}
/* Send the burst of packets on an output interface */
static int
-l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)
+l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
+ uint8_t port)
{
struct rte_mbuf **pkt_buffer;
unsigned ret;
- unsigned queueid = 0;
- pkt_buffer = (struct rte_mbuf **)qconf->tx_pkt_buf[port].buffer;
+ pkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer;
- ret = rte_eth_tx_burst(port, (uint16_t) queueid, pkt_buffer,
- (uint16_t)n);
+ ret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n);
port_statistics[port].tx += ret;
if (unlikely(ret < n)) {
port_statistics[port].dropped += (n - ret);
@@ -433,8 +445,8 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->tx_pkt_buf[port].len;
- qconf->tx_pkt_buf[port].buffer[len] = m;
+ len = qconf->pkt_buf[port].len;
+ qconf->pkt_buf[port].buffer[len] = m;
len++;
/* enough pkts to be sent */
@@ -443,7 +455,7 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
len = 0;
}
- qconf->tx_pkt_buf[port].len = len;
+ qconf->pkt_buf[port].len = len;
return 0;
}
@@ -503,6 +515,8 @@ static void
l2fwd_main_loop(struct l2fwd_crypto_options *options)
{
struct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST];
+ struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
+
unsigned lcore_id = rte_lcore_id();
uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
unsigned i, j, portid, nb_rx;
@@ -563,12 +577,12 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
if (unlikely(diff_tsc > drain_tsc)) {
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- if (qconf->tx_pkt_buf[portid].len == 0)
+ if (qconf->pkt_buf[portid].len == 0)
continue;
l2fwd_send_burst(&lcore_queue_conf[lcore_id],
- qconf->tx_pkt_buf[portid].len,
+ qconf->pkt_buf[portid].len,
(uint8_t) portid);
- qconf->tx_pkt_buf[portid].len = 0;
+ qconf->pkt_buf[portid].len = 0;
}
/* if timer is enabled */
@@ -597,8 +611,6 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
* Read packet from RX queues
*/
for (i = 0; i < qconf->nb_rx_ports; i++) {
- struct rte_mbuf_offload *ol;
-
portid = qconf->rx_port_list[i];
cparams = &port_cparams[i];
@@ -608,44 +620,49 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
port_statistics[portid].rx += nb_rx;
- /* Enqueue packets from Crypto device*/
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- ol = rte_pktmbuf_offload_alloc(
- l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
+ if (nb_rx) {
/*
- * If we can't allocate a offload, then drop
+ * If we can't allocate a crypto_ops, then drop
* the rest of the burst and dequeue and
* process the packets to free offload structs
*/
- if (unlikely(ol == NULL)) {
- for (; j < nb_rx; j++) {
- rte_pktmbuf_free(pkts_burst[j]);
- port_statistics[portid].dropped++;
- }
- break;
+ if (rte_crypto_op_bulk_alloc(
+ l2fwd_crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops_burst, nb_rx) !=
+ nb_rx) {
+ for (j = 0; j < nb_rx; j++)
+ rte_pktmbuf_free(pkts_burst[i]);
+
+ nb_rx = 0;
}
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- rte_prefetch0((void *)ol);
+ /* Enqueue packets from Crypto device*/
+ for (j = 0; j < nb_rx; j++) {
+ m = pkts_burst[j];
- l2fwd_simple_crypto_enqueue(m, ol, cparams);
+ l2fwd_simple_crypto_enqueue(m,
+ ops_burst[j], cparams);
+ }
}
/* Dequeue packets from Crypto device */
- nb_rx = rte_cryptodev_dequeue_burst(
- cparams->dev_id, cparams->qp_id,
- pkts_burst, MAX_PKT_BURST);
- crypto_statistics[cparams->dev_id].dequeued += nb_rx;
-
- /* Forward crypto'd packets */
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- rte_pktmbuf_offload_free(m->offload_ops);
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- l2fwd_simple_forward(m, portid);
- }
+ do {
+ nb_rx = rte_cryptodev_dequeue_burst(
+ cparams->dev_id, cparams->qp_id,
+ ops_burst, MAX_PKT_BURST);
+
+ crypto_statistics[cparams->dev_id].dequeued +=
+ nb_rx;
+
+ /* Forward crypto'd packets */
+ for (j = 0; j < nb_rx; j++) {
+ m = ops_burst[j]->sym->m_src;
+
+ rte_crypto_op_free(ops_burst[j]);
+ l2fwd_simple_forward(m, portid);
+ }
+ } while (nb_rx == MAX_PKT_BURST);
}
}
}
@@ -748,8 +765,8 @@ parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
/** Parse crypto key command line argument */
static int
-parse_key(struct rte_crypto_sym_key *key __rte_unused,
- unsigned length __rte_unused, char *arg __rte_unused)
+parse_key(struct l2fwd_key *key __rte_unused, unsigned length __rte_unused,
+ char *arg __rte_unused)
{
printf("Currently an unsupported argument!\n");
return -1;
@@ -759,26 +776,20 @@ parse_key(struct rte_crypto_sym_key *key __rte_unused,
static int
parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg)
{
- if (strcmp("SHA1", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA1;
+ if (strcmp("MD5_HMAC", optarg) == 0) {
+ *algo = RTE_CRYPTO_AUTH_MD5_HMAC;
return 0;
} else if (strcmp("SHA1_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
return 0;
- } else if (strcmp("SHA224", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA224;
- return 0;
} else if (strcmp("SHA224_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
return 0;
- } else if (strcmp("SHA256", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA256;
- return 0;
} else if (strcmp("SHA256_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
return 0;
- } else if (strcmp("SHA512", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA256;
+ } else if (strcmp("SHA384_HMAC", optarg) == 0) {
+ *algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
return 0;
} else if (strcmp("SHA512_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
@@ -809,7 +820,7 @@ static int
l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
struct option *lgopts, int option_index)
{
- if (strcmp(lgopts[option_index].name, "cdev_type") == 0)
+ if (strcmp(lgopts[option_index].name, "cdev") == 0)
return parse_cryptodev_type(&options->cdev_type, optarg);
else if (strcmp(lgopts[option_index].name, "chain") == 0)
@@ -824,11 +835,17 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return parse_cipher_op(&options->cipher_xform.cipher.op,
optarg);
- else if (strcmp(lgopts[option_index].name, "cipher_key") == 0)
- return parse_key(&options->cipher_xform.cipher.key,
- sizeof(options->ckey_data), optarg);
+ else if (strcmp(lgopts[option_index].name, "cipher_key") == 0) {
+ struct l2fwd_key key = { 0 };
+ int retval = 0;
+
+ retval = parse_key(&key, sizeof(options->ckey_data), optarg);
- else if (strcmp(lgopts[option_index].name, "iv") == 0)
+ options->cipher_xform.cipher.key.data = key.data;
+ options->cipher_xform.cipher.key.length = key.length;
+
+ return retval;
+ } else if (strcmp(lgopts[option_index].name, "iv") == 0)
return parse_key(&options->iv_key, sizeof(options->ivkey_data),
optarg);
@@ -841,11 +858,17 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return parse_auth_op(&options->auth_xform.auth.op,
optarg);
- else if (strcmp(lgopts[option_index].name, "auth_key") == 0)
- return parse_key(&options->auth_xform.auth.key,
- sizeof(options->akey_data), optarg);
+ else if (strcmp(lgopts[option_index].name, "auth_key") == 0) {
+ struct l2fwd_key key = { 0 };
+ int retval = 0;
+
+ retval = parse_key(&key, sizeof(options->akey_data), optarg);
- else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
+ options->auth_xform.auth.key.data = key.data;
+ options->auth_xform.auth.key.length = key.length;
+
+ return retval;
+ } else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
options->sessionless = 1;
return 0;
}
@@ -905,16 +928,16 @@ l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options,
const char *q_arg)
{
char *end = NULL;
- long int n;
+ unsigned long n;
/* parse number string */
- n = strtol(q_arg, &end, 10);
+ n = (unsigned)strtol(q_arg, &end, 10);
if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
n = 0;
if (n >= MAX_TIMER_PERIOD) {
- printf("Warning refresh period specified %ld is greater than "
- "max value %d! using max value",
+ printf("Warning refresh period specified %lu is greater than "
+ "max value %lu! using max value",
n, MAX_TIMER_PERIOD);
n = MAX_TIMER_PERIOD;
}
@@ -934,13 +957,13 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
options->nb_ports_per_lcore = 1;
options->refresh_period = 10000;
options->single_lcore = 0;
+ options->sessionless = 0;
options->cdev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
- options->sessionless = 0;
options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
/* Cipher Data */
- options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ options->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
options->cipher_xform.next = NULL;
options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -949,12 +972,11 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
generate_random_key(options->ckey_data, sizeof(options->ckey_data));
options->cipher_xform.cipher.key.data = options->ckey_data;
- options->cipher_xform.cipher.key.phys_addr = 0;
options->cipher_xform.cipher.key.length = 16;
/* Authentication Data */
- options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ options->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
options->auth_xform.next = NULL;
options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
@@ -966,7 +988,6 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
generate_random_key(options->akey_data, sizeof(options->akey_data));
options->auth_xform.auth.key.data = options->akey_data;
- options->auth_xform.auth.key.phys_addr = 0;
options->auth_xform.auth.key.length = 20;
}
@@ -993,39 +1014,6 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
printf("sessionless crypto: %s\n",
options->sessionless ? "enabled" : "disabled");
-#if 0
- options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
-
- /* Cipher Data */
- options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
- options->cipher_xform.next = NULL;
-
- options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- options->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
-
- generate_random_key(options->ckey_data, sizeof(options->ckey_data));
-
- options->cipher_xform.cipher.key.data = options->ckey_data;
- options->cipher_xform.cipher.key.phys_addr = 0;
- options->cipher_xform.cipher.key.length = 16;
-
-
- /* Authentication Data */
- options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
- options->auth_xform.next = NULL;
-
- options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
-
- options->auth_xform.auth.add_auth_data_length = 0;
- options->auth_xform.auth.digest_length = 20;
-
- generate_random_key(options->akey_data, sizeof(options->akey_data));
-
- options->auth_xform.auth.key.data = options->akey_data;
- options->auth_xform.auth.key.phys_addr = 0;
- options->auth_xform.auth.key.length = 20;
-#endif
}
/* Parse the argument given in the command line of the application */
@@ -1039,7 +1027,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
static struct option lgopts[] = {
{ "sessionless", no_argument, 0, 0 },
- { "cdev_type", required_argument, 0, 0 },
+ { "cdev", required_argument, 0, 0 },
{ "chain", required_argument, 0, 0 },
{ "cipher_algo", required_argument, 0, 0 },
@@ -1053,6 +1041,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
{ "iv", required_argument, 0, 0 },
{ "sessionless", no_argument, 0, 0 },
+
{ NULL, 0, 0, 0 }
};
@@ -1370,15 +1359,17 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n");
/* create the mbuf pool */
- l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 128,
- 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512,
+ sizeof(struct rte_crypto_op),
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
if (l2fwd_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
/* create crypto op pool */
- l2fwd_mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "mbuf_offload_pool", NB_MBUF, 128, 0, rte_socket_id());
- if (l2fwd_mbuf_ol_pool == NULL)
+ l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, 0,
+ rte_socket_id());
+ if (l2fwd_crypto_op_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
/* Enable Ethernet ports */
diff --git a/lib/Makefile b/lib/Makefile
index 6840f87..f254dba 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -36,7 +36,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_EAL) += librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_RING) += librte_ring
DIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += librte_mempool
DIRS-$(CONFIG_RTE_LIBRTE_MBUF) += librte_mbuf
-DIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += librte_mbuf_offload
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += librte_timer
DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile
DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index df0c0b8..e0c4fb9 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -44,7 +44,369 @@
extern "C" {
#endif
-#include <rte_crypto_sym.h>
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+#include "rte_crypto_sym.h"
+
+/** Crypto operation types */
+enum rte_crypto_op_type {
+ RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ /**< Undefined operation type */
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ /**< Symmetric operation */
+};
+
+/** Status of crypto operation */
+enum rte_crypto_op_status {
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ /**< Operation completed successfully */
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+ /**< Operation has not yet been processed by a crypto device */
+ RTE_CRYPTO_OP_STATUS_ENQUEUED,
+ /**< Operation is enqueued on device */
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
+ /**< Authentication verification failed */
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION,
+ /**<
+ * Symmetric operation failed due to invalid session arguments, or if
+ * in session-less mode, failed to allocate private operation material.
+ */
+ RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
+ /**< Operation failed due to invalid arguments in request */
+ RTE_CRYPTO_OP_STATUS_ERROR,
+ /**< Error handling operation */
+};
+
+/**
+ * Cryptographic Operation.
+ *
+ * This structure contains data relating to performing cryptographic
+ * operations. This operation structure is used to contain any operation which
+ * is supported by the cryptodev API, PMDs should check the type parameter to
+ * verify that the operation is a support function of the device. Crypto
+ * operations are enqueued and dequeued in crypto PMDs using the
+ * rte_cryptodev_enqueue_burst() / rte_cryptodev_dequeue_burst() .
+ */
+struct rte_crypto_op {
+ enum rte_crypto_op_type type;
+ /**< operation type */
+
+ enum rte_crypto_op_status status;
+ /**<
+ * operation status - this is reset to
+ * RTE_CRYPTO_OP_STATUS_NOT_PROCESSED on allocation from mempool and
+ * will be set to RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation
+ * is successfully processed by a crypto PMD
+ */
+
+ struct rte_mempool *mempool;
+ /**< crypto operation mempool which operation is allocated from */
+
+ phys_addr_t phys_addr;
+ /**< physical address of crypto operation */
+
+ void *opaque_data;
+ /**< Opaque pointer for user data */
+
+ union {
+ struct rte_crypto_sym_op *sym;
+ /**< Symmetric operation parameters */
+ }; /**< operation specific parameters */
+} __rte_cache_aligned;
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ * @param type The crypto operation type.
+ */
+static inline void
+__rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type)
+{
+ op->type = type;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (type) {
+ case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
+ /** Symmetric operation structure starts after the end of the
+ * rte_crypto_op structure.
+ */
+ op->sym = (struct rte_crypto_sym_op *)(op + 1);
+ op->type = type;
+
+ __rte_crypto_sym_op_reset(op->sym);
+ break;
+ default:
+ break;
+ }
+
+ op->opaque_data = NULL;
+}
+
+/**
+ * Private data structure belonging to a crypto symmetric operation pool.
+ */
+struct rte_crypto_op_pool_private {
+ enum rte_crypto_op_type type;
+ /**< Crypto op pool type operation. */
+ uint16_t priv_size;
+ /**< Size of private area in each crypto operation. */
+};
+
+
+/**
+ * Returns the size of private data allocated with each rte_crypto_op object by
+ * the mempool
+ *
+ * @param mempool rte_crypto_op mempool
+ *
+ * @return private data size
+ */
+static inline uint16_t
+__rte_crypto_op_get_priv_data_size(struct rte_mempool *mempool)
+{
+ struct rte_crypto_op_pool_private *priv =
+ rte_mempool_get_priv(mempool);
+
+ return priv->priv_size;
+}
+
+
+/**
+ * Creates a crypto operation pool
+ *
+ * @param name pool name
+ * @param type crypto operation type, use
+ * RTE_CRYPTO_OP_TYPE_UNDEFINED for a pool which
+ * supports all operation types
+ * @param nb_elts number of elements in pool
+ * @param cache_size Number of elements to cache on lcore, see
+ * *rte_mempool_create* for further details about
+ * cache size
+ * @param priv_size Size of private data to allocate with each
+ * operation
+ * @param socket_id Socket to allocate memory on
+ *
+ * @return
+ * - On success pointer to mempool
+ * - On failure NULL
+ */
+extern struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id);
+
+/**
+ * Bulk allocate raw element from mempool and return as crypto operations
+ *
+ * @param mempool crypto operation mempool.
+ * @param type crypto operation type.
+ * @param ops Array to place allocated crypto operations
+ * @param nb_ops Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns number of ops allocated
+ */
+static inline int
+__rte_crypto_op_raw_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_crypto_op_pool_private *priv;
+
+ priv = rte_mempool_get_priv(mempool);
+ if (unlikely(priv->type != type &&
+ priv->type != RTE_CRYPTO_OP_TYPE_UNDEFINED))
+ return -EINVAL;
+
+ if (rte_mempool_get_bulk(mempool, (void **)ops, nb_ops) == 0)
+ return nb_ops;
+
+ return 0;
+}
+
+/**
+ * Allocate a crypto operation from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_op *
+rte_crypto_op_alloc(struct rte_mempool *mempool, enum rte_crypto_op_type type)
+{
+ struct rte_crypto_op *op = NULL;
+ int retval;
+
+ retval = __rte_crypto_op_raw_bulk_alloc(mempool, type, &op, 1);
+ if (unlikely(retval != 1))
+ return NULL;
+
+ __rte_crypto_op_reset(op, type);
+
+ return op;
+}
+
+
+/**
+ * Bulk allocate crypto operations from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ * @param ops Array to place allocated crypto operations
+ * @param nb_ops Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+
+static inline unsigned
+rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ if (unlikely(__rte_crypto_op_raw_bulk_alloc(mempool, type, ops, nb_ops)
+ != nb_ops))
+ return 0;
+
+ for (i = 0; i < nb_ops; i++)
+ __rte_crypto_op_reset(ops[i], type);
+
+ return nb_ops;
+}
+
+
+
+/**
+ * Returns a pointer to the private data of a crypto operation if
+ * that operation has enough capacity for requested size.
+ *
+ * @param op crypto operation.
+ * @param size size of space requested in private data.
+ *
+ * @returns
+ * - if sufficient space available returns pointer to start of private data
+ * - if insufficient space returns NULL
+ */
+static inline void *
+__rte_crypto_op_get_priv_data(struct rte_crypto_op *op, uint32_t size)
+{
+ uint32_t priv_size;
+
+ if (likely(op->mempool != NULL)) {
+ priv_size = __rte_crypto_op_get_priv_data_size(op->mempool);
+
+ if (likely(priv_size >= size))
+ return (void *)((uint8_t *)(op + 1) +
+ sizeof(struct rte_crypto_sym_op));
+ }
+
+ return NULL;
+}
+
+/**
+ * free crypto operation structure
+ * If operation has been allocate from a rte_mempool, then the operation will
+ * be returned to the mempool.
+ *
+ * @param op symmetric crypto operation
+ */
+static inline void
+rte_crypto_op_free(struct rte_crypto_op *op)
+{
+ if (op != NULL && op->mempool != NULL)
+ rte_mempool_put(op->mempool, op);
+}
+
+/**
+ * Allocate a symmetric crypto operation in the private data of an mbuf.
+ *
+ * @param m mbuf which is associated with the crypto operation, the
+ * operation will be allocated in the private data of that
+ * mbuf.
+ *
+ * @returns
+ * - On success returns a pointer to the crypto operation.
+ * - On failure returns NULL.
+ */
+static inline struct rte_crypto_op *
+rte_crypto_sym_op_alloc_from_mbuf_priv_data(struct rte_mbuf *m)
+{
+ if (unlikely(m == NULL))
+ return NULL;
+
+ /*
+ * check that the mbuf's private data size is sufficient to contain a
+ * crypto operation
+ */
+ if (unlikely(m->priv_size < (sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op))))
+ return NULL;
+
+ /* private data starts immediately after the mbuf header in the mbuf. */
+ struct rte_crypto_op *op = (struct rte_crypto_op *)(m + 1);
+
+ __rte_crypto_op_reset(op, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+
+ op->mempool = NULL;
+ op->sym->m_src = m;
+
+ return op;
+}
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type and configures
+ * the chaining of the xforms in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+rte_crypto_op_sym_xforms_alloc(struct rte_crypto_op *op, uint8_t nb_xforms)
+{
+ void *priv_data;
+ uint32_t size;
+
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return NULL;
+
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
+
+ priv_data = __rte_crypto_op_get_priv_data(op, size);
+ if (priv_data == NULL)
+ return NULL;
+
+ return __rte_crypto_sym_op_sym_xforms_alloc(op->sym, priv_data,
+ nb_xforms);
+}
+
+
+/**
+ * Attach a session to a crypto operation
+ *
+ * @param op crypto operation, must be of type symmetric
+ * @param sess cryptodev session
+ */
+static inline int
+rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return -1;
+
+ return __rte_crypto_sym_op_attach_sym_session(op->sym, sess);
+}
#ifdef __cplusplus
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index cb2b8f6..831bbf1 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -46,6 +46,8 @@
extern "C" {
#endif
+#include <string.h>
+
#include <rte_mbuf.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -346,37 +348,24 @@ enum rte_crypto_sym_op_sess_type {
};
-/** Status of crypto operation */
-enum rte_crypto_op_status {
- RTE_CRYPTO_OP_STATUS_SUCCESS,
- /**< Operation completed successfully */
- RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
- /**< Operation not yet submitted to a cryptodev */
- RTE_CRYPTO_OP_STATUS_ENQUEUED,
- /**< Operation is enqueued on device */
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
- /**< Authentication verification failed */
- RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
- /**< Operation failed due to invalid arguments in request */
- RTE_CRYPTO_OP_STATUS_ERROR,
- /**< Error handling operation */
-};
+struct rte_cryptodev_sym_session;
/**
- * Cryptographic Operation Data.
+ * Symmetric Cryptographic Operation.
+ *
+ * This structure contains data relating to performing symmetric cryptographic
+ * processing on a referenced mbuf data buffer.
*
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
- * call for performing cipher, hash, or a combined hash and cipher operations.
+ * When a symmetric crypto operation is enqueued with the device for processing
+ * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
+ * which contains the source data which the crypto operation is to be performed
+ * on.
*/
struct rte_crypto_sym_op {
- enum rte_crypto_sym_op_sess_type type;
- enum rte_crypto_op_status status;
+ struct rte_mbuf *m_src; /**< source mbuf */
+ struct rte_mbuf *m_dst; /**< destination mbuf */
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
+ enum rte_crypto_sym_op_sess_type type;
union {
struct rte_cryptodev_sym_session *session;
@@ -387,7 +376,7 @@ struct rte_crypto_sym_op {
struct {
struct {
- uint32_t offset;
+ uint32_t offset;
/**< Starting point for cipher processing, specified
* as number of bytes from start of data in the source
* buffer. The result of the cipher operation will be
@@ -395,7 +384,7 @@ struct rte_crypto_sym_op {
* this location.
*/
- uint32_t length;
+ uint32_t length;
/**< The message length, in bytes, of the source buffer
* on which the cryptographic operation will be
* computed. This must be a multiple of the block size
@@ -414,17 +403,68 @@ struct rte_crypto_sym_op {
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
* field should be set to 0.
*/
- } to_cipher; /**< Data offsets and length for ciphering */
+ } data; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * Initialisation Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length
+ * is 96 bits) or J0 (for other sizes), where J0 is as
+ * defined by NIST SP800-38D. Regardless of the IV
+ * length, a full 16 bytes needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the
+ * nonce should be written starting at &iv[1] (to allow
+ * space for the implementation to write in the flags
+ * in the first byte). Note that a full 16 bytes should
+ * be allocated, even though the length field will
+ * have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from
+ * IEEE Std 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD
+ * be 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ uint16_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * length of the IV (which must be the same as the
+ * block length of the cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length
+ * of the counter (which must be the same as the block
+ * length of the cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs)
+ * or 16, in which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce,
+ * which can be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+ } cipher;
+ struct {
struct {
- uint32_t offset;
+ uint32_t offset;
/**< Starting point for hash processing, specified as
* number of bytes from start of packet in source
* buffer.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
+ * ignored. The field @ref aad field
* should be set instead.
*
* @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
@@ -432,180 +472,169 @@ struct rte_crypto_sym_op {
* of the AAD data in the source buffer.
*/
- uint32_t length;
+ uint32_t length;
/**< The message length, in bytes, of the source
* buffer that the hash will be computed on.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
+ * ignored. The field @ref aad field should be set
+ * instead.
*
* @note
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
* of operation, this field specifies the length of
* the AAD data in the source buffer.
*/
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
-
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
+ } data; /**< Data offsets and length for authentication */
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result
+ * should be inserted (in the case of digest generation)
+ * or where the purported digest exists (in the case of
+ * digest verification).
+ *
+ * At session creation time, the client specified the
+ * digest result length with the digest_length member
+ * of the @ref rte_crypto_auth_xform structure. For
+ * physical crypto devices the caller must allocate at
+ * least digest_length of physically contiguous memory
+ * at this location.
+ *
+ * For digest generation, the digest result will
+ * overwrite any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is
+ * understood to be in the destination buffer for
+ * digest generation, and in the source buffer for
+ * digest verification. The location of the digest
+ * result in this case is immediately following the
+ * region over which the digest is computed.
+ */
+ phys_addr_t phys_addr;
+ /**< Physical address of digest */
+ uint16_t length;
+ /**< Length of digest */
+ } digest; /**< Digest parameters */
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_sym_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
-
- void *user_data;
- /**< opaque pointer for user data */
-};
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD)
+ * needed for authenticated cipher mechanisms (CCM and
+ * GCM), and to the IV for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is
+ * set up for the session in the @ref
+ * rte_crypto_auth_xform structure as part of the @ref
+ * rte_cryptodev_session_create function call. This
+ * length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM),
+ * the caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset
+ * of one byte into the array, leaving room for the
+ * implementation to write in the flags to the first
+ * byte.
+ *
+ * - the additional authentication data itself should
+ * be written starting at an offset of 18 bytes into
+ * the array, leaving room for the length encoding in
+ * the first two bytes of the second block.
+ *
+ * - the array should be big enough to hold the above
+ * fields, plus any padding to round this up to the
+ * nearest multiple of the block size (16 bytes).
+ * Padding will be added by the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus
+ * any space to round this up to the nearest multiple
+ * of the block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set
+ * to 0. Instead the AAD data should be placed in the
+ * source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint16_t length; /**< Length of digest */
+ } aad;
+ /**< Additional authentication parameters */
+ } auth;
+} __rte_cache_aligned;
/**
- * Reset the fields of a crypto operation to their default values.
+ * Reset the fields of a symmetric operation to their default values.
*
* @param op The crypto operation to be reset.
*/
static inline void
__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
{
+ memset(op, 0, sizeof(*op));
+
op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
}
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type to
+ * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
+ * in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+__rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
+ void *priv_data, uint8_t nb_xforms)
+{
+ struct rte_crypto_sym_xform *xform;
+
+ sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
+
+ do {
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
+ xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
+ } while (xform);
+
+ return sym_op->xform;
+}
+
+
+/**
+ * Attach a session to a symmetric crypto operation
+ *
+ * @param op crypto operation
+ * @param sess cryptodev session
+ */
+static inline int
+__rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
struct rte_cryptodev_sym_session *sess)
{
- op->session = sess;
- op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+ sym_op->session = sess;
+ sym_op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+
+ return 0;
}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index c7fef6a..4632ca3 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1056,3 +1056,79 @@ rte_cryptodev_sym_session_free(uint8_t dev_id,
return NULL;
}
+
+/** Initialise rte_crypto_op mempool element */
+static void
+rte_crypto_op_init(struct rte_mempool *mempool,
+ void *opaque_arg,
+ void *_op_data,
+ __rte_unused unsigned i)
+{
+ struct rte_crypto_op *op = _op_data;
+ enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
+
+ memset(_op_data, 0, mempool->elt_size);
+
+ __rte_crypto_op_reset(op, type);
+
+ op->phys_addr = rte_mem_virt2phy(_op_data);
+ op->mempool = mempool;
+}
+
+
+struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id)
+{
+ struct rte_crypto_op_pool_private *priv;
+
+ unsigned elt_size = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) +
+ priv_size;
+
+ /* lookup mempool in case already allocated */
+ struct rte_mempool *mp = rte_mempool_lookup(name);
+
+ if (mp != NULL) {
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ if (mp->elt_size != elt_size ||
+ mp->cache_size < cache_size ||
+ mp->size < nb_elts ||
+ priv->priv_size < priv_size) {
+ mp = NULL;
+ CDEV_LOG_ERR("Mempool %s already exists but with "
+ "incompatible parameters", name);
+ return NULL;
+ }
+ return mp;
+ }
+
+ mp = rte_mempool_create(
+ name,
+ nb_elts,
+ elt_size,
+ cache_size,
+ sizeof(struct rte_crypto_op_pool_private),
+ NULL,
+ NULL,
+ rte_crypto_op_init,
+ &type,
+ socket_id,
+ 0);
+
+ if (mp == NULL) {
+ CDEV_LOG_ERR("Failed to create mempool %s", name);
+ return NULL;
+ }
+
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ priv->priv_size = priv_size;
+ priv->type = type;
+
+ return mp;
+}
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 732e2b9..aab8cff 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -40,16 +40,14 @@
* Defines RTE Crypto Device APIs for the provisioning of cipher and
* authentication operations.
*
- * @warning
* @b EXPERIMENTAL: this API may change without prior notice
+ *
*/
#ifdef __cplusplus
extern "C" {
#endif
-#include "stddef.h"
-
#include "rte_crypto.h"
#include "rte_dev.h"
@@ -58,15 +56,18 @@ extern "C" {
#define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd")
/**< AES-NI Multi buffer PMD device name */
#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd")
-/**< Intel QAT SYM PMD device name */
+/**< Intel QAT Symmetric Crypto PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
- RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT SYM PMD */
+ RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT PMD Symmetric Crypto */
};
+
+extern const char **rte_cyptodev_names;
+
/* Logging Macros */
#define CDEV_LOG_ERR(fmt, args...) \
@@ -214,8 +215,6 @@ struct rte_cryptodev_config {
/**
* Configure a device.
*
- * EXPERIMENTAL: this API file may change without prior notice
- *
* This function must be invoked first before any other function in the
* API. This function can also be re-invoked when a device is in the
* stopped state.
@@ -411,12 +410,12 @@ rte_cryptodev_callback_unregister(uint8_t dev_id,
rte_cryptodev_cb_fn cb_fn, void *cb_arg);
-typedef uint16_t (*dequeue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Dequeue processed packets from queue pair of a device. */
-typedef uint16_t (*enqueue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Enqueue packets for processing on queue pair of a device. */
@@ -489,66 +488,65 @@ struct rte_cryptodev_data {
extern struct rte_cryptodev *rte_cryptodevs;
/**
*
- * Dequeue a burst of processed packets from a queue of the crypto device.
- * The dequeued packets are stored in *rte_mbuf* structures whose pointers are
- * supplied in the *pkts* array.
+ * Dequeue a burst of processed crypto operations from a queue on the crypto
+ * device. The dequeued operation are stored in *rte_crypto_op* structures
+ * whose pointers are supplied in the *ops* array.
*
- * The rte_crypto_dequeue_burst() function returns the number of packets
- * actually dequeued, which is the number of *rte_mbuf* data structures
- * effectively supplied into the *pkts* array.
+ * The rte_cryptodev_dequeue_burst() function returns the number of ops
+ * actually dequeued, which is the number of *rte_crypto_op* data structures
+ * effectively supplied into the *ops* array.
*
- * A return value equal to *nb_pkts* indicates that the queue contained
- * at least *rx_pkts* packets, and this is likely to signify that other
- * received packets remain in the input queue. Applications implementing
- * a "retrieve as much received packets as possible" policy can check this
- * specific case and keep invoking the rte_crypto_dequeue_burst() function
- * until a value less than *nb_pkts* is returned.
+ * A return value equal to *nb_ops* indicates that the queue contained
+ * at least *nb_ops* operations, and this is likely to signify that other
+ * processed operations remain in the devices output queue. Applications
+ * implementing a "retrieve as many processed operations as possible" policy
+ * can check this specific case and keep invoking the
+ * rte_cryptodev_dequeue_burst() function until a value less than
+ * *nb_ops* is returned.
*
- * The rte_crypto_dequeue_burst() function does not provide any error
+ * The rte_cryptodev_dequeue_burst() function does not provide any error
* notification to avoid the corresponding overhead.
*
- * @param dev_id The identifier of the device.
+ * @param dev_id The symmetric crypto device identifier
* @param qp_id The index of the queue pair from which to
* retrieve processed packets. The value must be
* in the range [0, nb_queue_pair - 1] previously
* supplied to rte_cryptodev_configure().
- * @param pkts The address of an array of pointers to
- * *rte_mbuf* structures that must be large enough
- * to store *nb_pkts* pointers in it.
- * @param nb_pkts The maximum number of packets to dequeue.
+ * @param ops The address of an array of pointers to
+ * *rte_crypto_op* structures that must be
+ * large enough to store *nb_ops* pointers in it.
+ * @param nb_ops The maximum number of operations to dequeue.
*
* @return
- * - The number of packets actually dequeued, which is the number
- * of pointers to *rte_mbuf* structures effectively supplied to the
- * *pkts* array.
+ * - The number of operations actually dequeued, which is the number
+ * of pointers to *rte_crypto_op* structures effectively supplied to the
+ * *ops* array.
*/
static inline uint16_t
rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
- nb_pkts = (*dev->dequeue_burst)
- (dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ nb_ops = (*dev->dequeue_burst)
+ (dev->data->queue_pairs[qp_id], ops, nb_ops);
- return nb_pkts;
+ return nb_ops;
}
/**
- * Enqueue a burst of packets for processing on a crypto device.
+ * Enqueue a burst of operations for processing on a crypto device.
*
- * The rte_crypto_enqueue_burst() function is invoked to place packets
- * on the queue *queue_id* of the device designated by its *dev_id*.
+ * The rte_cryptodev_enqueue_burst() function is invoked to place
+ * crypto operations on the queue *qp_id* of the device designated by
+ * its *dev_id*.
*
- * The *nb_pkts* parameter is the number of packets to process which are
- * supplied in the *pkts* array of *rte_mbuf* structures.
+ * The *nb_ops* parameter is the number of operations to process which are
+ * supplied in the *ops* array of *rte_crypto_op* structures.
*
- * The rte_crypto_enqueue_burst() function returns the number of packets it
- * actually sent. A return value equal to *nb_pkts* means that all packets
- * have been sent.
- *
- * Each mbuf in the *pkts* array must have a valid *rte_mbuf_offload* structure
- * attached which contains a valid crypto operation.
+ * The rte_cryptodev_enqueue_burst() function returns the number of
+ * operations it actually enqueued for processing. A return value equal to
+ * *nb_ops* means that all packets have been enqueued.
*
* @param dev_id The identifier of the device.
* @param qp_id The index of the queue pair which packets are
@@ -556,25 +554,25 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
* must be in the range [0, nb_queue_pairs - 1]
* previously supplied to
* *rte_cryptodev_configure*.
- * @param pkts The address of an array of *nb_pkts* pointers
- * to *rte_mbuf* structures which contain the
- * output packets.
- * @param nb_pkts The number of packets to transmit.
+ * @param ops The address of an array of *nb_ops* pointers
+ * to *rte_crypto_op* structures which contain
+ * the crypto operations to be processed.
+ * @param nb_ops The number of operations to process.
*
* @return
- * The number of packets actually enqueued on the crypto device. The return
- * value can be less than the value of the *nb_pkts* parameter when the
- * crypto devices queue is full or has been filled up.
- * The number of packets is 0 if the device hasn't been started.
+ * The number of operations actually enqueued on the crypto device. The return
+ * value can be less than the value of the *nb_ops* parameter when the
+ * crypto devices queue is full or if invalid parameters are specified in
+ * a *rte_crypto_op*.
*/
static inline uint16_t
rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
return (*dev->enqueue_burst)(
- dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ dev->data->queue_pairs[qp_id], ops, nb_ops);
}
@@ -620,7 +618,6 @@ extern struct rte_cryptodev_sym_session *
rte_cryptodev_sym_session_create(uint8_t dev_id,
struct rte_crypto_sym_xform *xform);
-
/**
* Free the memory associated with a previously allocated session.
*
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index a46af6f..b682184 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -1,4 +1,4 @@
-DPDK_2.2 {
+DPDK_16.04 {
global:
rte_cryptodevs;
@@ -27,6 +27,7 @@ DPDK_2.2 {
rte_cryptodev_queue_pair_setup;
rte_cryptodev_queue_pair_start;
rte_cryptodev_queue_pair_stop;
+ rte_crypto_op_pool_create;
local: *;
-};
+};
\ No newline at end of file
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index e39ad28..52f96c3 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -728,9 +728,6 @@ typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */
typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
* with a single assignment */
-/** Opaque rte_mbuf_offload structure declarations */
-struct rte_mbuf_offload;
-
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
@@ -847,9 +844,6 @@ struct rte_mbuf {
/** Timesync flags for use with IEEE1588. */
uint16_t timesync;
-
- /* Chain of off-load operations to perform on mbuf */
- struct rte_mbuf_offload *offload_ops;
} __rte_cache_aligned;
static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
diff --git a/lib/librte_mbuf_offload/Makefile b/lib/librte_mbuf_offload/Makefile
deleted file mode 100644
index acdb449..0000000
--- a/lib/librte_mbuf_offload/Makefile
+++ /dev/null
@@ -1,52 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2015 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_mbuf_offload.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-EXPORT_MAP := rte_mbuf_offload_version.map
-
-LIBABIVER := 1
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) := rte_mbuf_offload.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD)-include := rte_mbuf_offload.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_cryptodev
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.c b/lib/librte_mbuf_offload/rte_mbuf_offload.c
deleted file mode 100644
index 5c0c9dd..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <rte_common.h>
-
-#include "rte_mbuf_offload.h"
-
-/** Initialize rte_mbuf_offload structure */
-static void
-rte_pktmbuf_offload_init(struct rte_mempool *mp,
- __rte_unused void *opaque_arg,
- void *_op_data,
- __rte_unused unsigned i)
-{
- struct rte_mbuf_offload *ol = _op_data;
-
- memset(_op_data, 0, mp->elt_size);
-
- ol->type = RTE_PKTMBUF_OL_NOT_SPECIFIED;
- ol->mp = mp;
-}
-
-
-struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id)
-{
- struct rte_pktmbuf_offload_pool_private *priv;
- unsigned elt_size = sizeof(struct rte_mbuf_offload) + priv_size;
-
-
- /* lookup mempool in case already allocated */
- struct rte_mempool *mp = rte_mempool_lookup(name);
-
- if (mp != NULL) {
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- if (priv->offload_priv_size < priv_size ||
- mp->elt_size != elt_size ||
- mp->cache_size < cache_size ||
- mp->size < size) {
- mp = NULL;
- return NULL;
- }
- return mp;
- }
-
- mp = rte_mempool_create(
- name,
- size,
- elt_size,
- cache_size,
- sizeof(struct rte_pktmbuf_offload_pool_private),
- NULL,
- NULL,
- rte_pktmbuf_offload_init,
- NULL,
- socket_id,
- 0);
-
- if (mp == NULL)
- return NULL;
-
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- priv->offload_priv_size = priv_size;
- return mp;
-}
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
deleted file mode 100644
index 5ce6058..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_MBUF_OFFLOAD_H_
-#define _RTE_MBUF_OFFLOAD_H_
-
-/**
- * @file
- * RTE mbuf offload
- *
- * The rte_mbuf_offload library provides the ability to specify a device generic
- * off-load operation independent of the current Rx/Tx Ethernet offloads
- * supported within the rte_mbuf structure, and add supports for multiple
- * off-load operations and offload device types.
- *
- * The rte_mbuf_offload specifies the particular off-load operation type, such
- * as a crypto operation, and provides a container for the operations
- * parameter's inside the op union. These parameters are then used by the
- * device which supports that operation to perform the specified offload.
- *
- * This library provides an API to create pre-allocated mempool of offload
- * operations, with supporting allocate and free functions. It also provides
- * APIs for attaching an offload to a mbuf, as well as an API to retrieve a
- * specified offload type from an mbuf offload chain.
- *
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- */
-
-#include <rte_mbuf.h>
-#include <rte_crypto.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** packet mbuf offload operation types */
-enum rte_mbuf_ol_op_type {
- RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
- /**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO_SYM
- /**< Crypto offload operation */
-};
-
-/**
- * Generic packet mbuf offload
- * This is used to specify a offload operation to be performed on a rte_mbuf.
- * Multiple offload operations can be chained to the same mbuf, but only a
- * single offload operation of a particular type can be in the chain
- */
-struct rte_mbuf_offload {
- struct rte_mbuf_offload *next; /**< next offload in chain */
- struct rte_mbuf *m; /**< mbuf offload is attached to */
- struct rte_mempool *mp; /**< mempool offload allocated from */
-
- enum rte_mbuf_ol_op_type type; /**< offload type */
- union {
- struct rte_crypto_sym_op crypto; /**< Crypto operation */
- } op;
-};
-
-/**< private data structure belonging to packet mbug offload mempool */
-struct rte_pktmbuf_offload_pool_private {
- uint16_t offload_priv_size;
- /**< Size of private area in each mbuf_offload. */
-};
-
-
-/**
- * Creates a mempool of rte_mbuf_offload objects
- *
- * EXPERIMENTAL: this API file may change without prior notice
- *
- * @param name mempool name
- * @param size number of objects in mempool
- * @param cache_size cache size of objects for each core
- * @param priv_size size of private data to be allocated with each
- * rte_mbuf_offload object
- * @param socket_id Socket on which to allocate mempool objects
- *
- * @return
- * - On success returns a valid mempool of rte_mbuf_offload objects
- * - On failure return NULL
- */
-extern struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id);
-
-
-/**
- * Returns private data size allocated with each rte_mbuf_offload object by
- * the mempool
- *
- * @param mpool rte_mbuf_offload mempool
- *
- * @return private data size
- */
-static inline uint16_t
-__rte_pktmbuf_offload_priv_size(struct rte_mempool *mpool)
-{
- struct rte_pktmbuf_offload_pool_private *priv =
- (struct rte_pktmbuf_offload_pool_private *)rte_mempool_get_priv(mpool);
-
- return priv->offload_priv_size;
-}
-
-/**
- * Get specified off-load operation type from mbuf.
- *
- * @param m packet mbuf.
- * @param type offload operation type requested.
- *
- * @return
- * - On success retruns rte_mbuf_offload pointer
- * - On failure returns NULL
- *
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_get(struct rte_mbuf *m, enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol;
-
- for (ol = m->offload_ops; ol != NULL; ol = ol->next)
- if (ol->type == type)
- return ol;
-
- return ol;
-}
-
-/**
- * Attach a rte_mbuf_offload to a mbuf. We only support a single offload of any
- * one type in our chain of offloads.
- *
- * @param m packet mbuf.
- * @param ol rte_mbuf_offload strucutre to be attached
- *
- * @returns
- * - On success returns the pointer to the offload we just added
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_attach(struct rte_mbuf *m, struct rte_mbuf_offload *ol)
-{
- struct rte_mbuf_offload **ol_last;
-
- for (ol_last = &m->offload_ops; ol_last[0] != NULL;
- ol_last = &ol_last[0]->next)
- if (ol_last[0]->type == ol->type)
- return NULL;
-
- ol_last[0] = ol;
- ol_last[0]->m = m;
- ol_last[0]->next = NULL;
-
- return ol_last[0];
-}
-
-
-/** Rearms rte_mbuf_offload default parameters */
-static inline void
-__rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
- enum rte_mbuf_ol_op_type type)
-{
- ol->m = NULL;
- ol->type = type;
-
- switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO_SYM:
- __rte_crypto_sym_op_reset(&ol->op.crypto); break;
- default:
- break;
- }
-}
-
-/** Allocate rte_mbuf_offload from mempool */
-static inline struct rte_mbuf_offload *
-__rte_pktmbuf_offload_raw_alloc(struct rte_mempool *mp)
-{
- void *buf = NULL;
-
- if (rte_mempool_get(mp, &buf) < 0)
- return NULL;
-
- return (struct rte_mbuf_offload *)buf;
-}
-
-/**
- * Allocate a rte_mbuf_offload with a specified operation type from
- * rte_mbuf_offload mempool
- *
- * @param mpool rte_mbuf_offload mempool
- * @param type offload operation type
- *
- * @returns
- * - On success returns a valid rte_mbuf_offload structure
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_alloc(struct rte_mempool *mpool,
- enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol = __rte_pktmbuf_offload_raw_alloc(mpool);
-
- if (ol != NULL)
- __rte_pktmbuf_offload_reset(ol, type);
-
- return ol;
-}
-
-/**
- * free rte_mbuf_offload structure
- */
-static inline void
-rte_pktmbuf_offload_free(struct rte_mbuf_offload *ol)
-{
- if (ol != NULL && ol->mp != NULL)
- rte_mempool_put(ol->mp, ol);
-}
-
-/**
- * Checks if the private data of a rte_mbuf_offload has enough capacity for
- * requested size
- *
- * @returns
- * - if sufficient space available returns pointer to start of private data
- * - if insufficient space returns NULL
- */
-static inline void *
-__rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
- uint16_t size)
-{
- uint16_t priv_size;
-
- if (likely(ol->mp != NULL)) {
- priv_size = __rte_pktmbuf_offload_priv_size(ol->mp);
-
- if (likely(priv_size >= size))
- return (void *)(ol + 1);
- }
- return NULL;
-}
-
-/**
- * Allocate space for crypto xforms in the private data space of the
- * rte_mbuf_offload. This also defaults the crypto xform type and configures
- * the chaining of the xform in the crypto operation
- *
- * @return
- * - On success returns pointer to first crypto xform in crypto operations chain
- * - On failure returns NULL
- */
-static inline struct rte_crypto_sym_xform *
-rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
- unsigned nb_xforms)
-{
- struct rte_crypto_sym_xform *xform;
- void *priv_data;
- uint16_t size;
-
- size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
- priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
-
- if (priv_data == NULL)
- return NULL;
-
- ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
-
- do {
- xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
- xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
- } while (xform);
-
- return ol->op.crypto.xform;
-}
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MBUF_OFFLOAD_H_ */
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map b/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
deleted file mode 100644
index 3d3b06a..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
+++ /dev/null
@@ -1,7 +0,0 @@
-DPDK_2.2 {
- global:
-
- rte_pktmbuf_offload_pool_create;
-
- local: *;
-};
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v7 0/2] cryptodev API changes
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 " Fiona Trahe
@ 2016-03-07 13:23 ` De Lara Guarch, Pablo
2016-03-07 13:53 ` Jain, Deepak K
` (7 subsequent siblings)
8 siblings, 0 replies; 62+ messages in thread
From: De Lara Guarch, Pablo @ 2016-03-07 13:23 UTC (permalink / raw)
To: Trahe, Fiona, dev
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Fiona Trahe
> Sent: Monday, March 07, 2016 11:50 AM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v7 0/2] cryptodev API changes
>
>
> This patch set separates the symmetric crypto operations from generic
> operations and then modifies the cryptodev burst API to accept bursts of
> rte_crypto_op rather than rte_mbufs.
>
> v7:
> - remove trailing spaces introduced in v6
> - rebase against recent config file changes
>
> v6:
> - restore EXPERIMENTAL label to cryptodev. Will handle removal in separate
> thread.
> (email subject was incorrect in v5, so v5 hasn't arrived in patchwork,
> therefore v6 is in-reply-to v4 message id)
>
> V5:
> - updates .map file
> - removes EXPERIMENTAL label from rte_cryptodev.h
>
> V4:
> - Fixes for issues introduced in __rte_crypto_op_raw_bulk_alloc in V3
> patcheset.
> - Typo fix in cached attribute on rte_crypto_op structure.
>
> V3:
> - Addresses V2 comments
> - Rebased for head
>
>
> Declan Doherty (1):
> cryptodev: change burst API to be crypto op oriented
>
> Fiona Trahe (2):
> cryptodev: API tidy and changes to support future extensions
> cryptodev: change burst API to be crypto op oriented
>
> MAINTAINERS | 4 -
> app/test/test_cryptodev.c | 894 +++++++++++----------
> app/test/test_cryptodev.h | 9 +-
> app/test/test_cryptodev_perf.c | 270 ++++---
> config/common_base | 7 -
> doc/api/doxy-api-index.md | 1 -
> drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 199 ++---
> drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 18 +-
> drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 6 +-
> drivers/crypto/qat/qat_crypto.c | 150 ++--
> drivers/crypto/qat/qat_crypto.h | 14 +-
> drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
> examples/l2fwd-crypto/main.c | 300 ++++---
> lib/Makefile | 1 -
> lib/librte_cryptodev/Makefile | 1 +
> lib/librte_cryptodev/rte_crypto.h | 819 +++++++------------
> lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
> lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
> lib/librte_cryptodev/rte_cryptodev.h | 189 ++---
> lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
> lib/librte_cryptodev/rte_cryptodev_version.map | 7 +-
> lib/librte_mbuf/rte_mbuf.h | 6 -
> lib/librte_mbuf_offload/Makefile | 52 --
> lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
> lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 -------
> .../rte_mbuf_offload_version.map | 7 -
> 26 files changed, 2145 insertions(+), 2016 deletions(-)
> create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
> delete mode 100644 lib/librte_mbuf_offload/Makefile
> delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
> delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
> delete mode 100644
> lib/librte_mbuf_offload/rte_mbuf_offload_version.map
>
> --
> 2.1.0
Series-acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v7 0/2] cryptodev API changes
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 " Fiona Trahe
2016-03-07 13:23 ` De Lara Guarch, Pablo
@ 2016-03-07 13:53 ` Jain, Deepak K
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 0/5] " Fiona Trahe
` (6 subsequent siblings)
8 siblings, 0 replies; 62+ messages in thread
From: Jain, Deepak K @ 2016-03-07 13:53 UTC (permalink / raw)
To: Trahe, Fiona, dev
-----Original Message-----
From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Fiona Trahe
Sent: Monday, March 7, 2016 11:50 AM
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v7 0/2] cryptodev API changes
This patch set separates the symmetric crypto operations from generic operations and then modifies the cryptodev burst API to accept bursts of rte_crypto_op rather than rte_mbufs.
v7:
- remove trailing spaces introduced in v6
- rebase against recent config file changes
v6:
- restore EXPERIMENTAL label to cryptodev. Will handle removal in separate thread.
(email subject was incorrect in v5, so v5 hasn't arrived in patchwork, therefore v6 is in-reply-to v4 message id)
V5:
- updates .map file
- removes EXPERIMENTAL label from rte_cryptodev.h
V4:
- Fixes for issues introduced in __rte_crypto_op_raw_bulk_alloc in V3 patcheset.
- Typo fix in cached attribute on rte_crypto_op structure.
V3:
- Addresses V2 comments
- Rebased for head
Declan Doherty (1):
cryptodev: change burst API to be crypto op oriented
Fiona Trahe (2):
cryptodev: API tidy and changes to support future extensions
cryptodev: change burst API to be crypto op oriented
MAINTAINERS | 4 -
app/test/test_cryptodev.c | 894 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 270 ++++---
config/common_base | 7 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 199 ++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 18 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 6 +-
drivers/crypto/qat/qat_crypto.c | 150 ++--
drivers/crypto/qat/qat_crypto.h | 14 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 300 ++++---
lib/Makefile | 1 -
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 819 +++++++------------
lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
lib/librte_cryptodev/rte_cryptodev.h | 189 ++---
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 7 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 -------
.../rte_mbuf_offload_version.map | 7 -
26 files changed, 2145 insertions(+), 2016 deletions(-) create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
delete mode 100644 lib/librte_mbuf_offload/Makefile delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
--
2.1.0
Series-acked-by: Deepak Kumar JAIN <deepak.k.jain@intel.com>
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v7 1/2] cryptodev: API tidy and changes to support future extensions
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 1/2] cryptodev: API tidy and changes to support future extensions Fiona Trahe
@ 2016-03-08 14:10 ` Thomas Monjalon
2016-03-10 10:30 ` Trahe, Fiona
0 siblings, 1 reply; 62+ messages in thread
From: Thomas Monjalon @ 2016-03-08 14:10 UTC (permalink / raw)
To: Fiona Trahe; +Cc: dev
Hi,
2016-03-07 11:50, Fiona Trahe:
> This patch splits symmetric specific definitions and
> functions away from the common crypto APIs to facilitate the future extension
> and expansion of the cryptodev framework, in order to allow asymmetric
> crypto operations to be introduced at a later date, as well as to clean the
> logical structure of the public includes. The patch also introduces the _sym
> prefix to symmetric specific structure and functions to improve clarity in
> the API.
It seems you need to update the examples in the same patch, they do not compile
anymore after these changes.
Again, it would be easier to review if you had split the changes to several
patches: one for the sym suffix, others for more tidying.
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v7 2/2] cryptodev: change burst API to be crypto op oriented
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 2/2] cryptodev: change burst API to be crypto op oriented Fiona Trahe
@ 2016-03-08 14:32 ` Thomas Monjalon
2016-03-09 12:55 ` Trahe, Fiona
0 siblings, 1 reply; 62+ messages in thread
From: Thomas Monjalon @ 2016-03-08 14:32 UTC (permalink / raw)
To: Fiona Trahe; +Cc: dev
2016-03-07 11:50, Fiona Trahe:
> This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts
> rte_crypto_op's rather than the current implementation which operates on
> rte_mbuf bursts, this simplifies the burst processing in the crypto PMDs and the
> use of crypto operations in general.
>
> The changes also continues the separatation of the symmetric operation parameters
> from the more general operation parameters, this will simplify the integration of
> asymmetric crypto operations in the future.
>
> As well as the changes to the crypto APIs this patch adds functions for managing
> rte_crypto_op pools to the cryptodev API. It modifies the existing PMDs, unit
> tests and sample application to work with the modified APIs and finally
> removes the now unused rte_mbuf_offload library.
Why not doing several patches?
> -Packet buffer offload - EXPERIMENTAL
> -M: Declan Doherty <declan.doherty@intel.com>
> -F: lib/librte_mbuf_offload/
Removing a library is important. It is not mentioned in the message.
It deserves a separate commit, please.
> @@ -62,8 +61,7 @@ struct crypto_unittest_params {
>
> struct rte_cryptodev_sym_session *sess;
>
> - struct rte_mbuf_offload *ol;
> - struct rte_crypto_sym_op *op;
> + struct rte_crypto_op *op;
Isn't it something which was just renamed in the previous patch?
> -#if HEX_DUMP
> +#ifdef HEX_DUMP
> static void
> hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
A better clean-up would be to remove this ifdef.
If you need a debug function which is not already in EAL, you can
consider adding it.
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v7 2/2] cryptodev: change burst API to be crypto op oriented
2016-03-08 14:32 ` Thomas Monjalon
@ 2016-03-09 12:55 ` Trahe, Fiona
2016-03-10 10:28 ` Trahe, Fiona
0 siblings, 1 reply; 62+ messages in thread
From: Trahe, Fiona @ 2016-03-09 12:55 UTC (permalink / raw)
To: Thomas Monjalon; +Cc: dev
> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas.monjalon@6wind.com]
> Sent: Tuesday, March 08, 2016 2:32 PM
> To: Trahe, Fiona
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v7 2/2] cryptodev: change burst API to be crypto
> op oriented
>
> 2016-03-07 11:50, Fiona Trahe:
> > This patch modifies the crypto burst enqueue/dequeue APIs to operate
> > on bursts rte_crypto_op's rather than the current implementation which
> > operates on rte_mbuf bursts, this simplifies the burst processing in
> > the crypto PMDs and the use of crypto operations in general.
> >
> > The changes also continues the separatation of the symmetric operation
> > parameters from the more general operation parameters, this will
> > simplify the integration of asymmetric crypto operations in the future.
> >
> > As well as the changes to the crypto APIs this patch adds functions
> > for managing rte_crypto_op pools to the cryptodev API. It modifies the
> > existing PMDs, unit tests and sample application to work with the
> > modified APIs and finally removes the now unused rte_mbuf_offload library.
>
> Why not doing several patches?
>
> > -Packet buffer offload - EXPERIMENTAL
> > -M: Declan Doherty <declan.doherty@intel.com>
> > -F: lib/librte_mbuf_offload/
>
> Removing a library is important. It is not mentioned in the message.
> It deserves a separate commit, please.
>
> > @@ -62,8 +61,7 @@ struct crypto_unittest_params {
> >
> > struct rte_cryptodev_sym_session *sess;
> >
> > - struct rte_mbuf_offload *ol;
> > - struct rte_crypto_sym_op *op;
> > + struct rte_crypto_op *op;
>
> Isn't it something which was just renamed in the previous patch?
>
> > -#if HEX_DUMP
> > +#ifdef HEX_DUMP
> > static void
> > hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
>
> A better clean-up would be to remove this ifdef.
> If you need a debug function which is not already in EAL, you can consider
> adding it.
>
Hi Thomas,
We're working on this. Will spin the patchset as soon as we can.
Fiona
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v7 2/2] cryptodev: change burst API to be crypto op oriented
2016-03-09 12:55 ` Trahe, Fiona
@ 2016-03-10 10:28 ` Trahe, Fiona
0 siblings, 0 replies; 62+ messages in thread
From: Trahe, Fiona @ 2016-03-10 10:28 UTC (permalink / raw)
To: Trahe, Fiona, Thomas Monjalon; +Cc: dev
Hi Thomas,
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Trahe, Fiona
> Sent: Wednesday, March 09, 2016 12:56 PM
> To: Thomas Monjalon
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v7 2/2] cryptodev: change burst API to be crypto
> op oriented
>
>
>
> > -----Original Message-----
> > From: Thomas Monjalon [mailto:thomas.monjalon@6wind.com]
> > Sent: Tuesday, March 08, 2016 2:32 PM
> > To: Trahe, Fiona
> > Cc: dev@dpdk.org
> > Subject: Re: [dpdk-dev] [PATCH v7 2/2] cryptodev: change burst API to
> > be crypto op oriented
> >
> > 2016-03-07 11:50, Fiona Trahe:
> > > This patch modifies the crypto burst enqueue/dequeue APIs to operate
> > > on bursts rte_crypto_op's rather than the current implementation
> > > which operates on rte_mbuf bursts, this simplifies the burst
> > > processing in the crypto PMDs and the use of crypto operations in general.
> > >
> > > The changes also continues the separatation of the symmetric
> > > operation parameters from the more general operation parameters,
> > > this will simplify the integration of asymmetric crypto operations in the
> future.
> > >
> > > As well as the changes to the crypto APIs this patch adds functions
> > > for managing rte_crypto_op pools to the cryptodev API. It modifies
> > > the existing PMDs, unit tests and sample application to work with
> > > the modified APIs and finally removes the now unused rte_mbuf_offload
> library.
> >
> > Why not doing several patches?
> >
We will post v8 patchset today with more granular patches
> > > -Packet buffer offload - EXPERIMENTAL
> > > -M: Declan Doherty <declan.doherty@intel.com>
> > > -F: lib/librte_mbuf_offload/
> >
> > Removing a library is important. It is not mentioned in the message.
> > It deserves a separate commit, please.
> >
ok
> > > @@ -62,8 +61,7 @@ struct crypto_unittest_params {
> > >
> > > struct rte_cryptodev_sym_session *sess;
> > >
> > > - struct rte_mbuf_offload *ol;
> > > - struct rte_crypto_sym_op *op;
> > > + struct rte_crypto_op *op;
> >
> > Isn't it something which was just renamed in the previous patch?
It looks like a double rename, but it's more than that.
In first patch rte_crypto_op was renamed rte_crypto_sym_op and moved from rte_crypto.h to to rte_crypto_sym.h
because it was exclusively for symmetric operations.
In the later patch a more generic rte_crypto_op was introduced in rte_crypto.h which can handle various operation types by having a type and union. Initially the only type is symmetric and so the union points to an rte_crypto_sym_op but it's planned to extended to handle asymmetric.
> >
> > > -#if HEX_DUMP
> > > +#ifdef HEX_DUMP
> > > static void
> > > hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
> >
> > A better clean-up would be to remove this ifdef.
> > If you need a debug function which is not already in EAL, you can
> > consider adding it.
> >
Agreed. We will look at adding the debug needed. However this is not likely to make it into the patchset today.
>
> Hi Thomas,
> We're working on this. Will spin the patchset as soon as we can.
> Fiona
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v7 1/2] cryptodev: API tidy and changes to support future extensions
2016-03-08 14:10 ` Thomas Monjalon
@ 2016-03-10 10:30 ` Trahe, Fiona
0 siblings, 0 replies; 62+ messages in thread
From: Trahe, Fiona @ 2016-03-10 10:30 UTC (permalink / raw)
To: Thomas Monjalon; +Cc: dev
Hi Thomas,
> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas.monjalon@6wind.com]
> Sent: Tuesday, March 08, 2016 2:11 PM
> To: Trahe, Fiona
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v7 1/2] cryptodev: API tidy and changes to
> support future extensions
>
> Hi,
>
> 2016-03-07 11:50, Fiona Trahe:
> > This patch splits symmetric specific definitions and functions away
> > from the common crypto APIs to facilitate the future extension and
> > expansion of the cryptodev framework, in order to allow asymmetric
> > crypto operations to be introduced at a later date, as well as to
> > clean the logical structure of the public includes. The patch also
> > introduces the _sym prefix to symmetric specific structure and
> > functions to improve clarity in the API.
>
> It seems you need to update the examples in the same patch, they do not
> compile anymore after these changes.
Sorry, fixed.
> Again, it would be easier to review if you had split the changes to several
> patches: one for the sym suffix, others for more tidying.
V8 patchset will be sent shortly with more granular split
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v8 0/5] cryptodev API changes
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 " Fiona Trahe
2016-03-07 13:23 ` De Lara Guarch, Pablo
2016-03-07 13:53 ` Jain, Deepak K
@ 2016-03-10 13:42 ` Fiona Trahe
2016-03-10 14:05 ` De Lara Guarch, Pablo
` (7 more replies)
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 1/5] cryptodev: code cleanup Fiona Trahe
` (5 subsequent siblings)
8 siblings, 8 replies; 62+ messages in thread
From: Fiona Trahe @ 2016-03-10 13:42 UTC (permalink / raw)
To: dev
This patch set separates the symmetric crypto operations from generic operations and then modifies the cryptodev burst API to accept bursts of rte_crypto_op rather than rte_mbufs.
v8:
- split patchset for easier review
- fix broken /examples/l2fwd-crypto build in intermediate patch
- split removal of rte_mbuf_offload into separate commit
v7:
- remove trailing spaces introduced in v6
- rebase against recent config file changes
v6:
- restore EXPERIMENTAL label to cryptodev. Will handle removal in separate thread.
(email subject was incorrect in v5, so v5 hasn't arrived in patchwork, therefore v6 is in-reply-to v4 message id)
V5:
- updates .map file
- removes EXPERIMENTAL label from rte_cryptodev.h
V4:
- Fixes for issues introduced in __rte_crypto_op_raw_bulk_alloc in V3 patcheset.
- Typo fix in cached attribute on rte_crypto_op structure.
V3:
- Addresses V2 comments
- Rebased for head
Declan Doherty (2):
cryptodev: change burst API to be crypto op oriented
mbuf_offload: remove library
Fiona Trahe (3):
cryptodev: code cleanup
cryptodev: refactor to partition common from symmetric-specific code
cryptodev: remove unused phys_addr field from key
MAINTAINERS | 4 -
app/test/test_cryptodev.c | 883 +++++++++++----------
app/test/test_cryptodev.h | 5 +-
app/test/test_cryptodev_perf.c | 270 ++++---
config/common_base | 7 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 190 +++--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 30 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 10 +-
drivers/crypto/qat/qat_crypto.c | 160 ++--
drivers/crypto/qat/qat_crypto.h | 14 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 234 +++---
lib/Makefile | 1 -
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 819 ++++++++-----------
lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
lib/librte_cryptodev/rte_cryptodev.h | 189 ++---
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 7 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 --------
.../rte_mbuf_offload_version.map | 7 -
26 files changed, 2130 insertions(+), 1967 deletions(-)
create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
delete mode 100644 lib/librte_mbuf_offload/Makefile
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v8 1/5] cryptodev: code cleanup
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 " Fiona Trahe
` (2 preceding siblings ...)
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 0/5] " Fiona Trahe
@ 2016-03-10 13:42 ` Fiona Trahe
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 2/5] cryptodev: refactor to partition common from symmetric-specific code Fiona Trahe
` (4 subsequent siblings)
8 siblings, 0 replies; 62+ messages in thread
From: Fiona Trahe @ 2016-03-10 13:42 UTC (permalink / raw)
To: dev; +Cc: Declan Doherty
- Fixed >80char lines in test file
- Removed unused elements from stats struct
- Removed unused objects in rte_cryptodev_pmd.h
- Renamed variables
- Replaced leading spaces with tabs
- Improved performance results display in test
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
Signed-off-by: Declan Doherty <declan.dohery@intel.com>
---
app/test/test_cryptodev.c | 151 ++++++++++-----------
app/test/test_cryptodev.h | 5 +-
app/test/test_cryptodev_perf.c | 54 +++++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 28 ++--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 12 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 2 +-
lib/librte_cryptodev/rte_cryptodev.h | 35 +----
lib/librte_cryptodev/rte_cryptodev_pmd.h | 10 +-
8 files changed, 139 insertions(+), 158 deletions(-)
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 62f8fb0..1a0f204 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -165,7 +165,8 @@ testsuite_setup(void)
ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
"MBUF_OFFLOAD_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_xform),
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -679,76 +680,77 @@ static uint8_t aes_cbc_iv[] = {
/* ***** AES-CBC / HMAC-SHA1 Hash Tests ***** */
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_ciphertext[] = {
- 0x8B, 0X4D, 0XDA, 0X1B, 0XCF, 0X04, 0XA0, 0X31,
- 0XB4, 0XBF, 0XBD, 0X68, 0X43, 0X20, 0X7E, 0X76,
- 0XB1, 0X96, 0X8B, 0XA2, 0X7C, 0XA2, 0X83, 0X9E,
- 0X39, 0X5A, 0X2F, 0X7E, 0X92, 0XB4, 0X48, 0X1A,
- 0X3F, 0X6B, 0X5D, 0XDF, 0X52, 0X85, 0X5F, 0X8E,
- 0X42, 0X3C, 0XFB, 0XE9, 0X1A, 0X24, 0XD6, 0X08,
- 0XDD, 0XFD, 0X16, 0XFB, 0XE9, 0X55, 0XEF, 0XF0,
- 0XA0, 0X8D, 0X13, 0XAB, 0X81, 0XC6, 0X90, 0X01,
- 0XB5, 0X18, 0X84, 0XB3, 0XF6, 0XE6, 0X11, 0X57,
- 0XD6, 0X71, 0XC6, 0X3C, 0X3F, 0X2F, 0X33, 0XEE,
- 0X24, 0X42, 0X6E, 0XAC, 0X0B, 0XCA, 0XEC, 0XF9,
- 0X84, 0XF8, 0X22, 0XAA, 0X60, 0XF0, 0X32, 0XA9,
- 0X75, 0X75, 0X3B, 0XCB, 0X70, 0X21, 0X0A, 0X8D,
- 0X0F, 0XE0, 0XC4, 0X78, 0X2B, 0XF8, 0X97, 0XE3,
- 0XE4, 0X26, 0X4B, 0X29, 0XDA, 0X88, 0XCD, 0X46,
- 0XEC, 0XAA, 0XF9, 0X7F, 0XF1, 0X15, 0XEA, 0XC3,
- 0X87, 0XE6, 0X31, 0XF2, 0XCF, 0XDE, 0X4D, 0X80,
- 0X70, 0X91, 0X7E, 0X0C, 0XF7, 0X26, 0X3A, 0X92,
- 0X4F, 0X18, 0X83, 0XC0, 0X8F, 0X59, 0X01, 0XA5,
- 0X88, 0XD1, 0XDB, 0X26, 0X71, 0X27, 0X16, 0XF5,
- 0XEE, 0X10, 0X82, 0XAC, 0X68, 0X26, 0X9B, 0XE2,
- 0X6D, 0XD8, 0X9A, 0X80, 0XDF, 0X04, 0X31, 0XD5,
- 0XF1, 0X35, 0X5C, 0X3B, 0XDD, 0X9A, 0X65, 0XBA,
- 0X58, 0X34, 0X85, 0X61, 0X1C, 0X42, 0X10, 0X76,
- 0X73, 0X02, 0X42, 0XC9, 0X23, 0X18, 0X8E, 0XB4,
- 0X6F, 0XB4, 0XA3, 0X54, 0X6E, 0X88, 0X3B, 0X62,
- 0X7C, 0X02, 0X8D, 0X4C, 0X9F, 0XC8, 0X45, 0XF4,
- 0XC9, 0XDE, 0X4F, 0XEB, 0X22, 0X83, 0X1B, 0XE4,
- 0X49, 0X37, 0XE4, 0XAD, 0XE7, 0XCD, 0X21, 0X54,
- 0XBC, 0X1C, 0XC2, 0X04, 0X97, 0XB4, 0X10, 0X61,
- 0XF0, 0XE4, 0XEF, 0X27, 0X63, 0X3A, 0XDA, 0X91,
- 0X41, 0X25, 0X62, 0X1C, 0X5C, 0XB6, 0X38, 0X4A,
- 0X88, 0X71, 0X59, 0X5A, 0X8D, 0XA0, 0X09, 0XAF,
- 0X72, 0X94, 0XD7, 0X79, 0X5C, 0X60, 0X7C, 0X8F,
- 0X4C, 0XF5, 0XD9, 0XA1, 0X39, 0X6D, 0X81, 0X28,
- 0XEF, 0X13, 0X28, 0XDF, 0XF5, 0X3E, 0XF7, 0X8E,
- 0X09, 0X9C, 0X78, 0X18, 0X79, 0XB8, 0X68, 0XD7,
- 0XA8, 0X29, 0X62, 0XAD, 0XDE, 0XE1, 0X61, 0X76,
- 0X1B, 0X05, 0X16, 0XCD, 0XBF, 0X02, 0X8E, 0XA6,
- 0X43, 0X6E, 0X92, 0X55, 0X4F, 0X60, 0X9C, 0X03,
- 0XB8, 0X4F, 0XA3, 0X02, 0XAC, 0XA8, 0XA7, 0X0C,
- 0X1E, 0XB5, 0X6B, 0XF8, 0XC8, 0X4D, 0XDE, 0XD2,
- 0XB0, 0X29, 0X6E, 0X40, 0XE6, 0XD6, 0XC9, 0XE6,
- 0XB9, 0X0F, 0XB6, 0X63, 0XF5, 0XAA, 0X2B, 0X96,
- 0XA7, 0X16, 0XAC, 0X4E, 0X0A, 0X33, 0X1C, 0XA6,
- 0XE6, 0XBD, 0X8A, 0XCF, 0X40, 0XA9, 0XB2, 0XFA,
- 0X63, 0X27, 0XFD, 0X9B, 0XD9, 0XFC, 0XD5, 0X87,
- 0X8D, 0X4C, 0XB6, 0XA4, 0XCB, 0XE7, 0X74, 0X55,
- 0XF4, 0XFB, 0X41, 0X25, 0XB5, 0X4B, 0X0A, 0X1B,
- 0XB1, 0XD6, 0XB7, 0XD9, 0X47, 0X2A, 0XC3, 0X98,
- 0X6A, 0XC4, 0X03, 0X73, 0X1F, 0X93, 0X6E, 0X53,
- 0X19, 0X25, 0X64, 0X15, 0X83, 0XF9, 0X73, 0X2A,
- 0X74, 0XB4, 0X93, 0X69, 0XC4, 0X72, 0XFC, 0X26,
- 0XA2, 0X9F, 0X43, 0X45, 0XDD, 0XB9, 0XEF, 0X36,
- 0XC8, 0X3A, 0XCD, 0X99, 0X9B, 0X54, 0X1A, 0X36,
- 0XC1, 0X59, 0XF8, 0X98, 0XA8, 0XCC, 0X28, 0X0D,
- 0X73, 0X4C, 0XEE, 0X98, 0XCB, 0X7C, 0X58, 0X7E,
- 0X20, 0X75, 0X1E, 0XB7, 0XC9, 0XF8, 0XF2, 0X0E,
- 0X63, 0X9E, 0X05, 0X78, 0X1A, 0XB6, 0XA8, 0X7A,
- 0XF9, 0X98, 0X6A, 0XA6, 0X46, 0X84, 0X2E, 0XF6,
- 0X4B, 0XDC, 0X9B, 0X8F, 0X9B, 0X8F, 0XEE, 0XB4,
- 0XAA, 0X3F, 0XEE, 0XC0, 0X37, 0X27, 0X76, 0XC7,
- 0X95, 0XBB, 0X26, 0X74, 0X69, 0X12, 0X7F, 0XF1,
- 0XBB, 0XFF, 0XAE, 0XB5, 0X99, 0X6E, 0XCB, 0X0C
+ 0x8B, 0x4D, 0xDA, 0x1B, 0xCF, 0x04, 0xA0, 0x31,
+ 0xB4, 0xBF, 0xBD, 0x68, 0x43, 0x20, 0x7E, 0x76,
+ 0xB1, 0x96, 0x8B, 0xA2, 0x7C, 0xA2, 0x83, 0x9E,
+ 0x39, 0x5A, 0x2F, 0x7E, 0x92, 0xB4, 0x48, 0x1A,
+ 0x3F, 0x6B, 0x5D, 0xDF, 0x52, 0x85, 0x5F, 0x8E,
+ 0x42, 0x3C, 0xFB, 0xE9, 0x1A, 0x24, 0xD6, 0x08,
+ 0xDD, 0xFD, 0x16, 0xFB, 0xE9, 0x55, 0xEF, 0xF0,
+ 0xA0, 0x8D, 0x13, 0xAB, 0x81, 0xC6, 0x90, 0x01,
+ 0xB5, 0x18, 0x84, 0xB3, 0xF6, 0xE6, 0x11, 0x57,
+ 0xD6, 0x71, 0xC6, 0x3C, 0x3F, 0x2F, 0x33, 0xEE,
+ 0x24, 0x42, 0x6E, 0xAC, 0x0B, 0xCA, 0xEC, 0xF9,
+ 0x84, 0xF8, 0x22, 0xAA, 0x60, 0xF0, 0x32, 0xA9,
+ 0x75, 0x75, 0x3B, 0xCB, 0x70, 0x21, 0x0A, 0x8D,
+ 0x0F, 0xE0, 0xC4, 0x78, 0x2B, 0xF8, 0x97, 0xE3,
+ 0xE4, 0x26, 0x4B, 0x29, 0xDA, 0x88, 0xCD, 0x46,
+ 0xEC, 0xAA, 0xF9, 0x7F, 0xF1, 0x15, 0xEA, 0xC3,
+ 0x87, 0xE6, 0x31, 0xF2, 0xCF, 0xDE, 0x4D, 0x80,
+ 0x70, 0x91, 0x7E, 0x0C, 0xF7, 0x26, 0x3A, 0x92,
+ 0x4F, 0x18, 0x83, 0xC0, 0x8F, 0x59, 0x01, 0xA5,
+ 0x88, 0xD1, 0xDB, 0x26, 0x71, 0x27, 0x16, 0xF5,
+ 0xEE, 0x10, 0x82, 0xAC, 0x68, 0x26, 0x9B, 0xE2,
+ 0x6D, 0xD8, 0x9A, 0x80, 0xDF, 0x04, 0x31, 0xD5,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x73, 0x02, 0x42, 0xC9, 0x23, 0x18, 0x8E, 0xB4,
+ 0x6F, 0xB4, 0xA3, 0x54, 0x6E, 0x88, 0x3B, 0x62,
+ 0x7C, 0x02, 0x8D, 0x4C, 0x9F, 0xC8, 0x45, 0xF4,
+ 0xC9, 0xDE, 0x4F, 0xEB, 0x22, 0x83, 0x1B, 0xE4,
+ 0x49, 0x37, 0xE4, 0xAD, 0xE7, 0xCD, 0x21, 0x54,
+ 0xBC, 0x1C, 0xC2, 0x04, 0x97, 0xB4, 0x10, 0x61,
+ 0xF0, 0xE4, 0xEF, 0x27, 0x63, 0x3A, 0xDA, 0x91,
+ 0x41, 0x25, 0x62, 0x1C, 0x5C, 0xB6, 0x38, 0x4A,
+ 0x88, 0x71, 0x59, 0x5A, 0x8D, 0xA0, 0x09, 0xAF,
+ 0x72, 0x94, 0xD7, 0x79, 0x5C, 0x60, 0x7C, 0x8F,
+ 0x4C, 0xF5, 0xD9, 0xA1, 0x39, 0x6D, 0x81, 0x28,
+ 0xEF, 0x13, 0x28, 0xDF, 0xF5, 0x3E, 0xF7, 0x8E,
+ 0x09, 0x9C, 0x78, 0x18, 0x79, 0xB8, 0x68, 0xD7,
+ 0xA8, 0x29, 0x62, 0xAD, 0xDE, 0xE1, 0x61, 0x76,
+ 0x1B, 0x05, 0x16, 0xCD, 0xBF, 0x02, 0x8E, 0xA6,
+ 0x43, 0x6E, 0x92, 0x55, 0x4F, 0x60, 0x9C, 0x03,
+ 0xB8, 0x4F, 0xA3, 0x02, 0xAC, 0xA8, 0xA7, 0x0C,
+ 0x1E, 0xB5, 0x6B, 0xF8, 0xC8, 0x4D, 0xDE, 0xD2,
+ 0xB0, 0x29, 0x6E, 0x40, 0xE6, 0xD6, 0xC9, 0xE6,
+ 0xB9, 0x0F, 0xB6, 0x63, 0xF5, 0xAA, 0x2B, 0x96,
+ 0xA7, 0x16, 0xAC, 0x4E, 0x0A, 0x33, 0x1C, 0xA6,
+ 0xE6, 0xBD, 0x8A, 0xCF, 0x40, 0xA9, 0xB2, 0xFA,
+ 0x63, 0x27, 0xFD, 0x9B, 0xD9, 0xFC, 0xD5, 0x87,
+ 0x8D, 0x4C, 0xB6, 0xA4, 0xCB, 0xE7, 0x74, 0x55,
+ 0xF4, 0xFB, 0x41, 0x25, 0xB5, 0x4B, 0x0A, 0x1B,
+ 0xB1, 0xD6, 0xB7, 0xD9, 0x47, 0x2A, 0xC3, 0x98,
+ 0x6A, 0xC4, 0x03, 0x73, 0x1F, 0x93, 0x6E, 0x53,
+ 0x19, 0x25, 0x64, 0x15, 0x83, 0xF9, 0x73, 0x2A,
+ 0x74, 0xB4, 0x93, 0x69, 0xC4, 0x72, 0xFC, 0x26,
+ 0xA2, 0x9F, 0x43, 0x45, 0xDD, 0xB9, 0xEF, 0x36,
+ 0xC8, 0x3A, 0xCD, 0x99, 0x9B, 0x54, 0x1A, 0x36,
+ 0xC1, 0x59, 0xF8, 0x98, 0xA8, 0xCC, 0x28, 0x0D,
+ 0x73, 0x4C, 0xEE, 0x98, 0xCB, 0x7C, 0x58, 0x7E,
+ 0x20, 0x75, 0x1E, 0xB7, 0xC9, 0xF8, 0xF2, 0x0E,
+ 0x63, 0x9E, 0x05, 0x78, 0x1A, 0xB6, 0xA8, 0x7A,
+ 0xF9, 0x98, 0x6A, 0xA6, 0x46, 0x84, 0x2E, 0xF6,
+ 0x4B, 0xDC, 0x9B, 0x8F, 0x9B, 0x8F, 0xEE, 0xB4,
+ 0xAA, 0x3F, 0xEE, 0xC0, 0x37, 0x27, 0x76, 0xC7,
+ 0x95, 0xBB, 0x26, 0x74, 0x69, 0x12, 0x7F, 0xF1,
+ 0xBB, 0xFF, 0xAE, 0xB5, 0x99, 0x6E, 0xCB, 0x0C
};
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest[] = {
- 0x9a, 0X4f, 0X88, 0X1b, 0Xb6, 0X8f, 0Xd8, 0X60,
- 0X42, 0X1a, 0X7d, 0X3d, 0Xf5, 0X82, 0X80, 0Xf1,
- 0X18, 0X8c, 0X1d, 0X32 };
+ 0x9a, 0x4f, 0x88, 0x1b, 0xb6, 0x8f, 0xd8, 0x60,
+ 0x42, 0x1a, 0x7d, 0x3d, 0xf5, 0x82, 0x80, 0xf1,
+ 0x18, 0x8c, 0x1d, 0x32
+};
static int
@@ -775,7 +777,6 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
-
ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
@@ -785,8 +786,9 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.key.data = hmac_sha1_key;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
- /* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
+ /* Create crypto session*/
+ ut_params->sess = rte_cryptodev_session_create(
+ ts_params->valid_devs[0],
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -832,7 +834,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
CIPHER_IV_LENGTH_AES_CBC,
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
- "Ciphertext data not as expected");
+ "ciphertext data not as expected");
TEST_ASSERT_BUFFERS_ARE_EQUAL(
rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
@@ -1239,7 +1241,6 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
"Digest verification failed");
-
return TEST_SUCCESS;
}
@@ -1361,7 +1362,6 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
DIGEST_BYTE_LENGTH_SHA512,
"Generated digest data not as expected");
-
return TEST_SUCCESS;
}
@@ -1417,6 +1417,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
ut_params->auth_xform.auth.key.data = hmac_sha512_key;
ut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA512;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
+
return TEST_SUCCESS;
}
@@ -1490,8 +1491,6 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
"Digest verification failed");
-
-
return TEST_SUCCESS;
}
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 034393e..083266a 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -47,8 +47,9 @@
#define DEFAULT_NUM_XFORMS (2)
#define NUM_MBUFS (8191)
#define MBUF_CACHE_SIZE (250)
-#define MBUF_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512 + \
- sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATAPAYLOAD_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512)
+#define MBUF_SIZE (sizeof(struct rte_mbuf) + \
+ RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
#define BYTE_LENGTH(x) (x/8)
/* HASH DIGEST LENGTHS */
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 728bcf0..87f0670 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -1741,7 +1741,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < num_to_submit ; b++) {
tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ (const char *)data_params[0].expected.ciphertext,
data_params[0].length, 0);
TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
@@ -1814,7 +1814,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, burst_size);
if (burst_received == 0)
failed_polls++;
@@ -1823,11 +1824,15 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
end_cycles = rte_rdtsc_precise();
total_cycles += end_cycles - start_cycles;
}
+
while (num_received != num_to_submit) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, burst_size);
if (burst_received == 0)
failed_polls++;
@@ -1861,10 +1866,10 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
{
uint16_t index;
uint32_t burst_sent, burst_received;
- uint32_t b, num_sent, num_received, throughput;
+ uint32_t b, num_sent, num_received;
uint64_t failed_polls, retries, start_cycles, end_cycles;
const uint64_t mhz = rte_get_tsc_hz()/1000000;
- double mmps;
+ double throughput, mmps;
struct rte_mbuf *rx_mbufs[DEFAULT_BURST_SIZE], *tx_mbufs[DEFAULT_BURST_SIZE];
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
@@ -1904,7 +1909,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
"AES128_CBC_SHA256_HMAC requests with a constant burst "
"size of %u while varying payload sizes", DEFAULT_BURST_SIZE);
printf("\nDev No\tQP No\tReq Size(B)\tNum Sent\tNum Received\t"
- "Mrps\tThoughput(Mbps)");
+ "Mrps\tThoughput(Gbps)");
printf("\tRetries (Attempted a burst, but the device was busy)");
for (index = 0; index < MAX_PACKET_SIZE_INDEX; index++) {
num_sent = 0;
@@ -1957,15 +1962,19 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent) < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent : DEFAULT_BURST_SIZE);
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, tx_mbufs,
+ ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
+ < DEFAULT_BURST_SIZE) ?
+ DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
+ DEFAULT_BURST_SIZE);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
@@ -1973,10 +1982,13 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
num_received += burst_received;
}
while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num, 0,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num, 0,
rx_mbufs, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
@@ -1984,11 +1996,14 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
num_received += burst_received;
}
end_cycles = rte_rdtsc_precise();
- mmps = (double)num_received*mhz/(end_cycles - start_cycles);
- throughput = mmps*data_params[index].length*8;
+ mmps = ((double)num_received * mhz) /
+ (end_cycles - start_cycles);
+ throughput = (mmps * data_params[index].length * 8) / 1000;
+
printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
- data_params[index].length, num_sent, num_received);
- printf("\t%.2f\t%u", mmps, throughput);
+ data_params[index].length,
+ num_sent, num_received);
+ printf("\t%.2f\t%.2f", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
@@ -2002,6 +2017,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
rte_pktmbuf_free(tx_mbufs[b]);
}
}
+
printf("\n");
return TEST_SUCCESS;
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index c7d884c..f2afdb6 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -377,17 +377,21 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m,
get_digest_byte_length(job->hash_alg));
- if (job->auth_tag_output)
- memset(job->auth_tag_output, 0,
- sizeof(get_digest_byte_length(job->hash_alg)));
- else
+ if (job->auth_tag_output == NULL) {
+ MB_LOG_ERR("failed to allocate space in output mbuf "
+ "for temp digest");
return NULL;
+ }
+
+ memset(job->auth_tag_output, 0,
+ sizeof(get_digest_byte_length(job->hash_alg)));
+
} else {
job->auth_tag_output = c_op->digest.data;
}
/*
- * Multiple buffer library current only support returning a truncated
+ * Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
job->auth_tag_output_len_in_bytes =
@@ -489,7 +493,7 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
if (m)
rte_ring_enqueue(qp->processed_pkts, (void *)m);
else
- qp->qp_stats.dequeue_err_count++;
+ qp->stats.dequeue_err_count++;
job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
}
@@ -513,19 +517,19 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
for (i = 0; i < nb_bufs; i++) {
ol = rte_pktmbuf_offload_get(bufs[i], RTE_PKTMBUF_OL_CRYPTO);
if (unlikely(ol == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ qp->stats.enqueue_err_count++;
goto flush_jobs;
}
sess = get_session(qp, &ol->op.crypto);
if (unlikely(sess == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ qp->stats.enqueue_err_count++;
goto flush_jobs;
}
job = process_crypto_op(qp, bufs[i], &ol->op.crypto, sess);
if (unlikely(job == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ qp->stats.enqueue_err_count++;
goto flush_jobs;
}
@@ -543,7 +547,7 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
if (processed_jobs == 0)
goto flush_jobs;
else
- qp->qp_stats.enqueued_count += processed_jobs;
+ qp->stats.enqueued_count += processed_jobs;
return i;
flush_jobs:
@@ -553,7 +557,7 @@ flush_jobs:
*/
job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
if (job)
- qp->qp_stats.enqueued_count += handle_completed_jobs(qp, job);
+ qp->stats.enqueued_count += handle_completed_jobs(qp, job);
return i;
}
@@ -568,7 +572,7 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair,
nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
(void **)bufs, nb_bufs);
- qp->qp_stats.dequeued_count += nb_dequeued;
+ qp->stats.dequeued_count += nb_dequeued;
return nb_dequeued;
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index e43b898..76a85ff 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -76,11 +76,11 @@ aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- stats->enqueued_count += qp->qp_stats.enqueued_count;
- stats->dequeued_count += qp->qp_stats.dequeued_count;
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
- stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
- stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
}
}
@@ -93,7 +93,7 @@ aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
}
}
@@ -196,7 +196,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp->sess_mp = dev->data->session_pool;
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
/* Initialise multi-buffer manager */
(*qp->ops->job.init_mgr)(&qp->mb_mgr);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 2f98609..304c85c 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -150,7 +150,7 @@ struct aesni_mb_qp {
/**< Ring for placing process packets */
struct rte_mempool *sess_mp;
/**< Session Mempool */
- struct rte_cryptodev_stats qp_stats;
+ struct rte_cryptodev_stats stats;
/**< Queue pair statistics */
} __rte_cache_aligned;
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 892375d..0ccd1b8 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -130,17 +130,6 @@ struct rte_cryptodev_qp_conf {
typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
enum rte_cryptodev_event_type event, void *cb_arg);
-#ifdef RTE_CRYPTODEV_PERF
-/**
- * Crypto Device performance counter statistics structure. This structure is
- * used for RDTSC counters for measuring crypto operations.
- */
-struct rte_cryptodev_perf_stats {
- uint64_t t_accumlated; /**< Accumulated time processing operation */
- uint64_t t_min; /**< Max time */
- uint64_t t_max; /**< Min time */
-};
-#endif
/** Crypto Device statistics */
struct rte_cryptodev_stats {
@@ -153,29 +142,8 @@ struct rte_cryptodev_stats {
/**< Total error count on operations enqueued */
uint64_t dequeue_err_count;
/**< Total error count on operations dequeued */
+};
-#ifdef RTE_CRYPTODEV_DETAILED_STATS
- struct {
- uint64_t encrypt_ops; /**< Count of encrypt operations */
- uint64_t encrypt_bytes; /**< Number of bytes encrypted */
-
- uint64_t decrypt_ops; /**< Count of decrypt operations */
- uint64_t decrypt_bytes; /**< Number of bytes decrypted */
- } cipher; /**< Cipher operations stats */
-
- struct {
- uint64_t generate_ops; /**< Count of generate operations */
- uint64_t bytes_hashed; /**< Number of bytes hashed */
-
- uint64_t verify_ops; /**< Count of verify operations */
- uint64_t bytes_verified;/**< Number of bytes verified */
- } hash; /**< Hash operations stats */
-#endif
-
-#ifdef RTE_CRYPTODEV_PERF
- struct rte_cryptodev_perf_stats op_perf; /**< Operations stats */
-#endif
-} __rte_cache_aligned;
/**
* Create a virtual crypto device
@@ -632,7 +600,6 @@ extern struct rte_cryptodev_session *
rte_cryptodev_session_create(uint8_t dev_id,
struct rte_crypto_xform *xform);
-
/**
* Free the memory associated with a previously allocated session.
*
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index 8270afa..a16d109 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -56,11 +56,6 @@ extern "C" {
#include "rte_crypto.h"
#include "rte_cryptodev.h"
-struct rte_cryptodev_stats;
-struct rte_cryptodev_info;
-struct rte_cryptodev_qp_conf;
-
-enum rte_cryptodev_event_type;
#ifdef RTE_LIBRTE_CRYPTODEV_DEBUG
#define RTE_PMD_DEBUG_TRACE(...) \
@@ -80,7 +75,6 @@ struct rte_cryptodev_session {
};
struct rte_cryptodev_driver;
-struct rte_cryptodev;
/**
* Initialisation function of a crypto driver invoked for each matching
@@ -427,9 +421,9 @@ struct rte_cryptodev_ops {
cryptodev_info_get_t dev_infos_get; /**< Get device info. */
cryptodev_stats_get_t stats_get;
- /**< Get generic device statistics. */
+ /**< Get device statistics. */
cryptodev_stats_reset_t stats_reset;
- /**< Reset generic device statistics. */
+ /**< Reset device statistics. */
cryptodev_queue_pair_setup_t queue_pair_setup;
/**< Set up a device queue pair. */
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v8 2/5] cryptodev: refactor to partition common from symmetric-specific code
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 " Fiona Trahe
` (3 preceding siblings ...)
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 1/5] cryptodev: code cleanup Fiona Trahe
@ 2016-03-10 13:42 ` Fiona Trahe
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 3/5] cryptodev: remove unused phys_addr field from key Fiona Trahe
` (3 subsequent siblings)
8 siblings, 0 replies; 62+ messages in thread
From: Fiona Trahe @ 2016-03-10 13:42 UTC (permalink / raw)
To: dev
This patch splits symmetric specific definitions and
functions away from the common crypto APIs to facilitate the future extension
and expansion of the cryptodev framework, in order to allow asymmetric
crypto operations to be introduced at a later date, as well as to clean the
logical structure of the public includes. The patch also introduces the _sym
prefix to symmetric specific structure and functions to improve clarity in
the API.
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
---
app/test/test_cryptodev.c | 161 +++---
app/test/test_cryptodev_perf.c | 42 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 44 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 6 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 4 +-
drivers/crypto/qat/qat_crypto.c | 51 +-
drivers/crypto/qat/qat_crypto.h | 10 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 31 +-
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 547 +------------------
lib/librte_cryptodev/rte_crypto_sym.h | 598 +++++++++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 39 +-
lib/librte_cryptodev/rte_cryptodev.h | 46 +-
lib/librte_cryptodev/rte_cryptodev_pmd.h | 22 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 6 +-
lib/librte_mbuf_offload/rte_mbuf_offload.h | 22 +-
17 files changed, 867 insertions(+), 771 deletions(-)
create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 1a0f204..5ced183 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,13 +57,13 @@ struct crypto_testsuite_params {
};
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
struct rte_mbuf_offload *ol;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -78,7 +78,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_param);
@@ -166,7 +166,7 @@ testsuite_setup(void)
"MBUF_OFFLOAD_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -221,7 +221,7 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id,
&ts_params->conf),
@@ -276,7 +276,7 @@ ut_setup(void)
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
ts_params->conf.session_mp.nb_objs =
- (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_PMD) ?
+ (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_SYM_PMD) ?
DEFAULT_NUM_OPS_INFLIGHT :
DEFAULT_NUM_OPS_INFLIGHT;
@@ -320,7 +320,7 @@ ut_teardown(void)
/* free crypto session structure */
if (ut_params->sess) {
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
ut_params->sess);
ut_params->sess = NULL;
}
@@ -465,7 +465,7 @@ test_queue_pair_descriptor_setup(void)
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- ts_params->conf.session_mp.nb_objs = dev_info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = dev_info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
&ts_params->conf), "Failed to configure cryptodev %u",
@@ -768,7 +768,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -777,7 +777,8 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -787,21 +788,21 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create crypto session*/
- ut_params->sess = rte_cryptodev_session_create(
+ ut_params->sess = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0],
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -864,18 +865,18 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_xforms(
+ TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
ut_params->ol, 2),
"failed to allocate space for crypto transforms");
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
/* cipher parameters */
ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
@@ -884,7 +885,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
@@ -960,7 +961,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_SHA1);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -969,7 +970,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -979,13 +980,14 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -993,7 +995,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1068,7 +1070,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1077,7 +1079,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1087,13 +1089,14 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1101,7 +1104,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1170,7 +1173,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_SHA256);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1179,7 +1182,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1189,13 +1192,14 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1203,7 +1207,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1283,7 +1287,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1292,7 +1296,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1302,15 +1306,16 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1318,7 +1323,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1371,7 +1376,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params);
@@ -1386,8 +1391,9 @@ test_AES_CBC_HMAC_SHA512_decrypt_digest_verify(void)
"Failed to create session params");
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
return test_AES_CBC_HMAC_SHA512_decrypt_perform(ut_params->sess,
@@ -1400,7 +1406,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
{
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1409,7 +1415,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1423,7 +1429,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params)
{
@@ -1443,7 +1449,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1451,7 +1457,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1521,7 +1527,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1530,7 +1536,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1540,13 +1546,14 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1554,7 +1561,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)
rte_pktmbuf_prepend(ut_params->ibuf,
@@ -1614,7 +1621,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_AES_XCBC);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1623,7 +1630,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1633,13 +1640,14 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1647,7 +1655,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
@@ -1751,7 +1759,7 @@ test_multi_session(void)
struct crypto_unittest_params *ut_params = &unittest_params;
struct rte_cryptodev_info dev_info;
- struct rte_cryptodev_session **sessions;
+ struct rte_cryptodev_sym_session **sessions;
uint16_t i;
@@ -1760,12 +1768,13 @@ test_multi_session(void)
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- sessions = rte_malloc(NULL, (sizeof(struct rte_cryptodev_session *) *
- dev_info.max_nb_sessions) + 1, 0);
+ sessions = rte_malloc(NULL,
+ (sizeof(struct rte_cryptodev_sym_session *) *
+ dev_info.sym.max_nb_sessions) + 1, 0);
/* Create multiple crypto sessions*/
- for (i = 0; i < dev_info.max_nb_sessions; i++) {
- sessions[i] = rte_cryptodev_session_create(
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++) {
+ sessions[i] = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(sessions[i],
@@ -1780,13 +1789,13 @@ test_multi_session(void)
}
/* Next session create should fail */
- sessions[i] = rte_cryptodev_session_create(ts_params->valid_devs[0],
+ sessions[i] = rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NULL(sessions[i],
"Session creation succeeded unexpectedly!");
- for (i = 0; i < dev_info.max_nb_sessions; i++)
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++)
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
sessions[i]);
rte_free(sessions);
@@ -1805,7 +1814,7 @@ test_not_in_place_crypto(void)
/* Create multiple crypto sessions*/
- ut_params->sess = rte_cryptodev_session_create(
+ ut_params->sess = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0], &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1827,7 +1836,7 @@ test_not_in_place_crypto(void)
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1835,7 +1844,7 @@ test_not_in_place_crypto(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1961,7 +1970,7 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite = {
static int
test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_qat_testsuite);
}
static struct test_command cryptodev_qat_cmd = {
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 87f0670..b0c8abf 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -63,12 +63,12 @@ struct crypto_testsuite_params {
#define MAX_NUM_OF_OPS_PER_UT (128)
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf_offload *ol;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
@@ -127,7 +127,7 @@ testsuite_setup(void)
ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -179,7 +179,7 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->dev_id,
&ts_params->conf),
@@ -252,7 +252,7 @@ ut_teardown(void)
/* free crypto session structure */
if (ut_params->sess)
- rte_cryptodev_session_free(ts_params->dev_id,
+ rte_cryptodev_sym_session_free(ts_params->dev_id,
ut_params->sess);
/* free crypto operation structure */
@@ -1713,7 +1713,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1723,7 +1723,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1733,7 +1733,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1753,12 +1753,12 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
DIGEST_BYTE_LENGTH_SHA256);
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO);
+ ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
@@ -1881,7 +1881,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1890,7 +1890,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
ut_params->cipher_xform.cipher.key.length = CIPHER_IV_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1900,7 +1900,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1933,12 +1933,12 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -2060,7 +2060,7 @@ perftest_aesni_mb_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
static int
perftest_qat_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_testsuite);
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index f2afdb6..f39ebd5 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -109,7 +109,7 @@ calculate_auth_precomputes(hash_one_block_t one_block_hash,
/** Get xform chain order */
static int
-aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
{
/*
* Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
@@ -119,12 +119,12 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
if (xform->next == NULL || xform->next->next != NULL)
return -1;
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return HASH_CIPHER;
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return CIPHER_HASH;
return -1;
@@ -134,11 +134,11 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
static int
aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
hash_one_block_t hash_oneblock_fn;
- if (xform->type != RTE_CRYPTO_XFORM_AUTH) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
MB_LOG_ERR("Crypto xform struct not of type auth");
return -1;
}
@@ -196,11 +196,11 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
static int
aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
aes_keyexp_t aes_keyexp_fn;
- if (xform->type != RTE_CRYPTO_XFORM_CIPHER) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
MB_LOG_ERR("Crypto xform struct not of type cipher");
return -1;
}
@@ -259,10 +259,10 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
- const struct rte_crypto_xform *auth_xform = NULL;
- const struct rte_crypto_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
/* Select Crypto operation - hash then cipher / cipher then hash */
switch (aesni_mb_get_chain_order(xform)) {
@@ -296,11 +296,11 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
if (unlikely(crypto_op->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
@@ -313,7 +313,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
return NULL;
sess = (struct aesni_mb_session *)
- ((struct rte_cryptodev_session *)_sess)->_private;
+ ((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
sess, crypto_op->xform) != 0)) {
@@ -339,7 +339,8 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
*/
static JOB_AES_HMAC *
process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_op *c_op, struct aesni_mb_session *session)
+ struct rte_crypto_sym_op *c_op,
+ struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
@@ -436,14 +437,14 @@ static struct rte_mbuf *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
struct rte_mbuf *m;
- struct rte_crypto_op *c_op;
+ struct rte_crypto_sym_op *c_op;
if (job->user_data == NULL)
return NULL;
/* handled retrieved job */
m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_op *)job->user_data2;
+ c_op = (struct rte_crypto_sym_op *)job->user_data2;
/* set status as successful by default */
c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -463,7 +464,7 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_OP_SESSIONLESS) {
+ if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
rte_mempool_put(qp->sess_mp, c_op->session);
c_op->session = NULL;
}
@@ -515,7 +516,8 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
int i, processed_jobs = 0;
for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i], RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(bufs[i],
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
qp->stats.enqueue_err_count++;
goto flush_jobs;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 76a85ff..d56de12 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -108,7 +108,7 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
if (dev_info != NULL) {
dev_info->dev_type = dev->dev_type;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->max_nb_sessions = internals->max_nb_sessions;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
}
}
@@ -243,7 +243,7 @@ aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
/** Configure a aesni multi-buffer session from a crypto xform chain */
static void *
aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform, void *sess)
{
struct aesni_mb_private *internals = dev->data->dev_private;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 304c85c..0aed177 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -218,7 +218,7 @@ struct aesni_mb_session {
extern int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform);
+ const struct rte_crypto_sym_xform *xform);
/** device specific operations function pointer structure */
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 828756b..e7b9027 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,37 +89,37 @@ void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
}
static int
-qat_get_cmd_id(const struct rte_crypto_xform *xform)
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
{
if (xform->next == NULL)
return -1;
/* Cipher Only */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_CIPHER; */
/* Authentication Only */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_AUTH; */
/* Cipher then Authenticate */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
/* Authenticate then Cipher */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
return -1;
}
static struct rte_crypto_auth_xform *
-qat_get_auth_xform(struct rte_crypto_xform *xform)
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return &xform->auth;
xform = xform->next;
@@ -129,10 +129,10 @@ qat_get_auth_xform(struct rte_crypto_xform *xform)
}
static struct rte_crypto_cipher_xform *
-qat_get_cipher_xform(struct rte_crypto_xform *xform)
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return &xform->cipher;
xform = xform->next;
@@ -144,7 +144,7 @@ qat_get_cipher_xform(struct rte_crypto_xform *xform)
void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private)
+ struct rte_crypto_sym_xform *xform, void *session_private)
{
struct qat_pmd_private *internals = dev->data->dev_private;
@@ -261,7 +261,7 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
auth_xform->digest_length))
goto error_out;
- return (struct rte_cryptodev_session *)session;
+ return (struct rte_crypto_sym_session *)session;
error_out:
rte_mempool_put(internals->sess_mp, session);
@@ -275,7 +275,7 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
+uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
register struct qat_queue *queue;
@@ -327,7 +327,8 @@ kick_tail:
}
uint16_t
-qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct rte_mbuf_offload *ol;
struct qat_queue *queue;
@@ -343,12 +344,13 @@ qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
msg_counter != nb_pkts) {
rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf, RTE_PKTMBUF_OL_CRYPTO);
-
+ ol = rte_pktmbuf_offload_get(rx_mbuf,
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ ol->op.crypto.status =
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
@@ -384,20 +386,21 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
"to (%p) mbuf.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
" requests mbuf (%p) is sessionless.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type != RTE_CRYPTODEV_QAT_PMD)) {
+ if (unlikely(ol->op.crypto.session->type
+ != RTE_CRYPTODEV_QAT_SYM_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
@@ -520,8 +523,8 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
ADF_NUM_SYM_QPS_PER_BUNDLE *
ADF_NUM_BUNDLES_PER_DEV;
- info->max_nb_sessions = internals->max_nb_sessions;
- info->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
}
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index d680364..e9f71fe 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -109,16 +109,18 @@ qat_crypto_sym_session_init(struct rte_mempool *mempool, void *priv_sess);
extern void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+ struct rte_crypto_sym_xform *xform, void *session_private);
extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
uint16_t
-qat_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
uint16_t
-qat_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index e500c1e..85700fc 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,11 +89,11 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->pci_dev->addr.devid,
cryptodev->pci_dev->addr.function);
- cryptodev->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
+ cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index a950b74..ee519e7 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -124,13 +124,13 @@ struct l2fwd_crypto_options {
enum l2fwd_crypto_xform_chain xform_chain;
- struct rte_crypto_xform cipher_xform;
+ struct rte_crypto_sym_xform cipher_xform;
uint8_t ckey_data[32];
struct rte_crypto_key iv_key;
uint8_t ivkey_data[16];
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform auth_xform;
uint8_t akey_data[128];
};
@@ -141,9 +141,8 @@ struct l2fwd_crypto_params {
unsigned digest_length;
unsigned block_size;
-
struct rte_crypto_key iv_key;
- struct rte_cryptodev_session *session;
+ struct rte_cryptodev_sym_session *session;
};
/** lcore configuration */
@@ -372,7 +371,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
/* Append space for digest to end of packet */
ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
@@ -474,11 +473,11 @@ generate_random_key(uint8_t *key, unsigned length)
key[i] = rand() % 0xff;
}
-static struct rte_cryptodev_session *
+static struct rte_cryptodev_sym_session *
initialize_crypto_session(struct l2fwd_crypto_options *options,
uint8_t cdev_id)
{
- struct rte_crypto_xform *first_xform;
+ struct rte_crypto_sym_xform *first_xform;
if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
first_xform = &options->cipher_xform;
@@ -489,7 +488,7 @@ initialize_crypto_session(struct l2fwd_crypto_options *options,
}
/* Setup Cipher Parameters */
- return rte_cryptodev_session_create(cdev_id, first_xform);
+ return rte_cryptodev_sym_session_create(cdev_id, first_xform);
}
static void
@@ -610,7 +609,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
m = pkts_burst[j];
ol = rte_pktmbuf_offload_alloc(
l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
/*
* If we can't allocate a offload, then drop
* the rest of the burst and dequeue and
@@ -689,7 +688,7 @@ parse_cryptodev_type(enum rte_cryptodev_type *type, char *optarg)
*type = RTE_CRYPTODEV_AESNI_MB_PMD;
return 0;
} else if (strcmp("QAT", optarg) == 0) {
- *type = RTE_CRYPTODEV_QAT_PMD;
+ *type = RTE_CRYPTODEV_QAT_SYM_PMD;
return 0;
}
@@ -937,7 +936,7 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
/* Cipher Data */
- options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ options->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
options->cipher_xform.next = NULL;
options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -946,12 +945,11 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
generate_random_key(options->ckey_data, sizeof(options->ckey_data));
options->cipher_xform.cipher.key.data = options->ckey_data;
- options->cipher_xform.cipher.key.phys_addr = 0;
options->cipher_xform.cipher.key.length = 16;
/* Authentication Data */
- options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ options->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
options->auth_xform.next = NULL;
options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
@@ -963,7 +961,6 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
generate_random_key(options->akey_data, sizeof(options->akey_data));
options->auth_xform.auth.key.data = options->akey_data;
- options->auth_xform.auth.key.phys_addr = 0;
options->auth_xform.auth.key.length = 20;
}
@@ -982,7 +979,7 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
switch (options->cdev_type) {
case RTE_CRYPTODEV_AESNI_MB_PMD:
printf("cryptodev type: AES-NI MB PMD\n"); break;
- case RTE_CRYPTODEV_QAT_PMD:
+ case RTE_CRYPTODEV_QAT_SYM_PMD:
printf("cryptodev type: QAT PMD\n"); break;
default:
break;
@@ -1179,7 +1176,7 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports)
unsigned i, cdev_id, cdev_count, enabled_cdev_count = 0;
int retval;
- if (options->cdev_type == RTE_CRYPTODEV_QAT_PMD) {
+ if (options->cdev_type == RTE_CRYPTODEV_QAT_SYM_PMD) {
if (rte_cryptodev_count() < nb_ports)
return -1;
} else if (options->cdev_type == RTE_CRYPTODEV_AESNI_MB_PMD) {
diff --git a/lib/librte_cryptodev/Makefile b/lib/librte_cryptodev/Makefile
index 81fa3fc..0d59229 100644
--- a/lib/librte_cryptodev/Makefile
+++ b/lib/librte_cryptodev/Makefile
@@ -45,6 +45,7 @@ SRCS-y += rte_cryptodev.c
# export include files
SYMLINK-y-include += rte_crypto.h
+SYMLINK-y-include += rte_crypto_sym.h
SYMLINK-y-include += rte_cryptodev.h
SYMLINK-y-include += rte_cryptodev_pmd.h
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index 42343a8..620c00b 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,313 +36,14 @@
/**
* @file rte_crypto.h
*
- * RTE Cryptographic Definitions
+ * RTE Cryptography Common Definitions
*
- * Defines symmetric cipher and authentication algorithms and modes, as well
- * as supported symmetric crypto operation combinations.
*/
#ifdef __cplusplus
extern "C" {
#endif
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_mempool.h>
-
-/** Symmetric Cipher Algorithms */
-enum rte_crypto_cipher_algorithm {
- RTE_CRYPTO_CIPHER_NULL = 1,
- /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
-
- RTE_CRYPTO_CIPHER_3DES_CBC,
- /**< Triple DES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_3DES_CTR,
- /**< Triple DES algorithm in CTR mode */
- RTE_CRYPTO_CIPHER_3DES_ECB,
- /**< Triple DES algorithm in ECB mode */
-
- RTE_CRYPTO_CIPHER_AES_CBC,
- /**< AES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_AES_CCM,
- /**< AES algorithm in CCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_CCM* element of the
- * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_xform* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation
- */
- RTE_CRYPTO_CIPHER_AES_CTR,
- /**< AES algorithm in Counter mode */
- RTE_CRYPTO_CIPHER_AES_ECB,
- /**< AES algorithm in ECB mode */
- RTE_CRYPTO_CIPHER_AES_F8,
- /**< AES algorithm in F8 mode */
- RTE_CRYPTO_CIPHER_AES_GCM,
- /**< AES algorithm in GCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_GCM* element of the
- * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_setup_data* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation.
- */
- RTE_CRYPTO_CIPHER_AES_XTS,
- /**< AES algorithm in XTS mode */
-
- RTE_CRYPTO_CIPHER_ARC4,
- /**< (A)RC4 cipher algorithm */
-
- RTE_CRYPTO_CIPHER_KASUMI_F8,
- /**< Kasumi algorithm in F8 mode */
-
- RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- /**< SNOW3G algorithm in UEA2 mode */
-
- RTE_CRYPTO_CIPHER_ZUC_EEA3
- /**< ZUC algorithm in EEA3 mode */
-};
-
-/** Symmetric Cipher Direction */
-enum rte_crypto_cipher_operation {
- RTE_CRYPTO_CIPHER_OP_ENCRYPT,
- /**< Encrypt cipher operation */
- RTE_CRYPTO_CIPHER_OP_DECRYPT
- /**< Decrypt cipher operation */
-};
-
-/** Crypto key structure */
-struct rte_crypto_key {
- uint8_t *data; /**< pointer to key data */
- phys_addr_t phys_addr;
- size_t length; /**< key length in bytes */
-};
-
-/**
- * Symmetric Cipher Setup Data.
- *
- * This structure contains data relating to Cipher (Encryption and Decryption)
- * use to create a session.
- */
-struct rte_crypto_cipher_xform {
- enum rte_crypto_cipher_operation op;
- /**< This parameter determines if the cipher operation is an encrypt or
- * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
- * only encrypt operations are valid.
- */
- enum rte_crypto_cipher_algorithm algo;
- /**< Cipher algorithm */
-
- struct rte_crypto_key key;
- /**< Cipher key
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
- * point to a concatenation of the AES encryption key followed by a
- * keymask. As per RFC3711, the keymask should be padded with trailing
- * bytes to match the length of the encryption key used.
- *
- * For AES-XTS mode of operation, two keys must be provided and
- * key.data must point to the two keys concatenated together (Key1 ||
- * Key2). The cipher key length will contain the total size of both
- * keys.
- *
- * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
- * 192 bits (24 bytes) or 256 bits (32 bytes).
- *
- * For the CCM mode of operation, the only supported key length is 128
- * bits (16 bytes).
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
- * should be set to the combined length of the encryption key and the
- * keymask. Since the keymask and the encryption key are the same size,
- * key.length should be set to 2 x the AES encryption key length.
- *
- * For the AES-XTS mode of operation:
- * - Two keys must be provided and key.length refers to total length of
- * the two keys.
- * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
- * - Both keys must have the same size.
- **/
-};
-
-/** Symmetric Authentication / Hash Algorithms */
-enum rte_crypto_auth_algorithm {
- RTE_CRYPTO_AUTH_NULL = 1,
- /**< NULL hash algorithm. */
-
- RTE_CRYPTO_AUTH_AES_CBC_MAC,
- /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
- RTE_CRYPTO_AUTH_AES_CCM,
- /**< AES algorithm in CCM mode. This is an authenticated cipher. When
- * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
- * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
- * set up the related rte_crypto_cipher_setup_data structure in the
- * session context or the corresponding parameter in the crypto
- * operation data structures op_params parameter MUST be set for a
- * session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_CMAC,
- /**< AES CMAC algorithm. */
- RTE_CRYPTO_AUTH_AES_GCM,
- /**< AES algorithm in GCM mode. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_GMAC,
- /**< AES GMAC algorithm. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_XCBC_MAC,
- /**< AES XCBC algorithm. */
-
- RTE_CRYPTO_AUTH_KASUMI_F9,
- /**< Kasumi algorithm in F9 mode. */
-
- RTE_CRYPTO_AUTH_MD5,
- /**< MD5 algorithm */
- RTE_CRYPTO_AUTH_MD5_HMAC,
- /**< HMAC using MD5 algorithm */
-
- RTE_CRYPTO_AUTH_SHA1,
- /**< 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA1_HMAC,
- /**< HMAC using 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224,
- /**< 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224_HMAC,
- /**< HMAC using 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256,
- /**< 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256_HMAC,
- /**< HMAC using 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384,
- /**< 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384_HMAC,
- /**< HMAC using 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512,
- /**< 512 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512_HMAC,
- /**< HMAC using 512 bit SHA algorithm. */
-
- RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- /**< SNOW3G algorithm in UIA2 mode. */
-
- RTE_CRYPTO_AUTH_ZUC_EIA3,
- /**< ZUC algorithm in EIA3 mode */
-};
-
-/** Symmetric Authentication / Hash Operations */
-enum rte_crypto_auth_operation {
- RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
- RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
-};
-
-/**
- * Authentication / Hash transform data.
- *
- * This structure contains data relating to an authentication/hash crypto
- * transforms. The fields op, algo and digest_length are common to all
- * authentication transforms and MUST be set.
- */
-struct rte_crypto_auth_xform {
- enum rte_crypto_auth_operation op;
- /**< Authentication operation type */
- enum rte_crypto_auth_algorithm algo;
- /**< Authentication algorithm selection */
-
- struct rte_crypto_key key; /**< Authentication key data.
- * The authentication key length MUST be less than or equal to the
- * block size of the algorithm. It is the callers responsibility to
- * ensure that the key length is compliant with the standard being used
- * (for example RFC 2104, FIPS 198a).
- */
-
- uint32_t digest_length;
- /**< Length of the digest to be returned. If the verify option is set,
- * this specifies the length of the digest to be compared for the
- * session.
- *
- * If the value is less than the maximum length allowed by the hash,
- * the result shall be truncated. If the value is greater than the
- * maximum length allowed by the hash then an error will be generated
- * by *rte_cryptodev_session_create* or by the
- * *rte_cryptodev_enqueue_burst* if using session-less APIs.
- */
-
- uint32_t add_auth_data_length;
- /**< The length of the additional authenticated data (AAD) in bytes.
- * The maximum permitted value is 240 bytes, unless otherwise specified
- * below.
- *
- * This field must be specified when the hash algorithm is one of the
- * following:
- *
- * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
- * length of the IV (which should be 16).
- *
- * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
- * the length of the Additional Authenticated Data (called A, in NIST
- * SP800-38D).
- *
- * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
- * the length of the associated data (called A, in NIST SP800-38C).
- * Note that this does NOT include the length of any padding, or the
- * 18 bytes reserved at the start of the above field to store the
- * block B0 and the encoded length. The maximum permitted value in
- * this case is 222 bytes.
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
- * this field is not used and should be set to 0. Instead the length
- * of the AAD data is specified in the message length to hash field of
- * the rte_crypto_op_data structure.
- */
-};
-
-/** Crypto transformation types */
-enum rte_crypto_xform_type {
- RTE_CRYPTO_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
- RTE_CRYPTO_XFORM_AUTH, /**< Authentication xform */
- RTE_CRYPTO_XFORM_CIPHER /**< Cipher xform */
-};
-
-/**
- * Crypto transform structure.
- *
- * This is used to specify the crypto transforms required, multiple transforms
- * can be chained together to specify a chain transforms such as authentication
- * then cipher, or cipher then authentication. Each transform structure can
- * hold a single transform, the type field is used to specify which transform
- * is contained within the union
- */
-struct rte_crypto_xform {
- struct rte_crypto_xform *next; /**< next xform in chain */
-
- enum rte_crypto_xform_type type; /**< xform type */
- union {
- struct rte_crypto_auth_xform auth;
- /**< Authentication / hash xform */
- struct rte_crypto_cipher_xform cipher;
- /**< Cipher xform */
- };
-};
-
-/**
- * Crypto operation session type. This is used to specify whether a crypto
- * operation has session structure attached for immutable parameters or if all
- * operation information is included in the operation data structure.
- */
-enum rte_crypto_op_sess_type {
- RTE_CRYPTO_OP_WITH_SESSION, /**< Session based crypto operation */
- RTE_CRYPTO_OP_SESSIONLESS /**< Session-less crypto operation */
-};
-
/** Status of crypto operation */
enum rte_crypto_op_status {
RTE_CRYPTO_OP_STATUS_SUCCESS,
@@ -359,249 +60,7 @@ enum rte_crypto_op_status {
/**< Error handling operation */
};
-/**
- * Cryptographic Operation Data.
- *
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_enqueue_burst() call
- * for performing cipher, hash, or a combined hash and cipher operations.
- */
-struct rte_crypto_op {
- enum rte_crypto_op_sess_type type;
- enum rte_crypto_op_status status;
-
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
-
- union {
- struct rte_cryptodev_session *session;
- /**< Handle for the initialised session context */
- struct rte_crypto_xform *xform;
- /**< Session-less API crypto operation parameters */
- };
-
- struct {
- struct {
- uint32_t offset;
- /**< Starting point for cipher processing, specified
- * as number of bytes from start of data in the source
- * buffer. The result of the cipher operation will be
- * written back into the output buffer starting at
- * this location.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source buffer
- * on which the cryptographic operation will be
- * computed. This must be a multiple of the block size
- * if a block cipher is being used. This is also the
- * same as the result length.
- *
- * @note
- * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
- * this value should not include the length of the
- * padding or the length of the MAC; the driver will
- * compute the actual number of bytes over which the
- * encryption will occur, which will include these
- * values.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
- * field should be set to 0.
- */
- } to_cipher; /**< Data offsets and length for ciphering */
-
- struct {
- uint32_t offset;
- /**< Starting point for hash processing, specified as
- * number of bytes from start of packet in source
- * buffer.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
- * mode of operation, this field specifies the start
- * of the AAD data in the source buffer.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source
- * buffer that the hash will be computed on.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
- * of operation, this field specifies the length of
- * the AAD data in the source buffer.
- */
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
-
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
-
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
-
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
-
- void *user_data;
- /**< opaque pointer for user data */
-};
-
-
-/**
- * Reset the fields of a crypto operation to their default values.
- *
- * @param op The crypto operation to be reset.
- */
-static inline void
-__rte_crypto_op_reset(struct rte_crypto_op *op)
-{
- op->type = RTE_CRYPTO_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
-}
-
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_op_attach_session(struct rte_crypto_op *op,
- struct rte_cryptodev_session *sess)
-{
- op->session = sess;
- op->type = RTE_CRYPTO_OP_WITH_SESSION;
-}
+#include <rte_crypto_sym.h>
#ifdef __cplusplus
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
new file mode 100644
index 0000000..de6c701
--- /dev/null
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -0,0 +1,598 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_SYM_H_
+#define _RTE_CRYPTO_SYM_H_
+
+/**
+ * @file rte_crypto_sym.h
+ *
+ * RTE Definitions for Symmetric Cryptography
+ *
+ * Defines symmetric cipher and authentication algorithms and modes, as well
+ * as supported symmetric crypto operation combinations.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+
+/** Symmetric Cipher Algorithms */
+enum rte_crypto_cipher_algorithm {
+ RTE_CRYPTO_CIPHER_NULL = 1,
+ /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
+
+ RTE_CRYPTO_CIPHER_3DES_CBC,
+ /**< Triple DES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_3DES_CTR,
+ /**< Triple DES algorithm in CTR mode */
+ RTE_CRYPTO_CIPHER_3DES_ECB,
+ /**< Triple DES algorithm in ECB mode */
+
+ RTE_CRYPTO_CIPHER_AES_CBC,
+ /**< AES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_AES_CCM,
+ /**< AES algorithm in CCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_CCM* element of the
+ * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_xform* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation
+ */
+ RTE_CRYPTO_CIPHER_AES_CTR,
+ /**< AES algorithm in Counter mode */
+ RTE_CRYPTO_CIPHER_AES_ECB,
+ /**< AES algorithm in ECB mode */
+ RTE_CRYPTO_CIPHER_AES_F8,
+ /**< AES algorithm in F8 mode */
+ RTE_CRYPTO_CIPHER_AES_GCM,
+ /**< AES algorithm in GCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_GCM* element of the
+ * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_setup_data* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_CIPHER_AES_XTS,
+ /**< AES algorithm in XTS mode */
+
+ RTE_CRYPTO_CIPHER_ARC4,
+ /**< (A)RC4 cipher algorithm */
+
+ RTE_CRYPTO_CIPHER_KASUMI_F8,
+ /**< Kasumi algorithm in F8 mode */
+
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ /**< SNOW3G algorithm in UEA2 mode */
+
+ RTE_CRYPTO_CIPHER_ZUC_EEA3
+ /**< ZUC algorithm in EEA3 mode */
+};
+
+/** Symmetric Cipher Direction */
+enum rte_crypto_cipher_operation {
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ /**< Encrypt cipher operation */
+ RTE_CRYPTO_CIPHER_OP_DECRYPT
+ /**< Decrypt cipher operation */
+};
+
+/** Crypto key structure */
+struct rte_crypto_key {
+ uint8_t *data; /**< pointer to key data */
+ phys_addr_t phys_addr;
+ size_t length; /**< key length in bytes */
+};
+
+/**
+ * Symmetric Cipher Setup Data.
+ *
+ * This structure contains data relating to Cipher (Encryption and Decryption)
+ * use to create a session.
+ */
+struct rte_crypto_cipher_xform {
+ enum rte_crypto_cipher_operation op;
+ /**< This parameter determines if the cipher operation is an encrypt or
+ * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
+ * only encrypt operations are valid.
+ */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< Cipher algorithm */
+
+ struct rte_crypto_key key;
+ /**< Cipher key
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
+ * point to a concatenation of the AES encryption key followed by a
+ * keymask. As per RFC3711, the keymask should be padded with trailing
+ * bytes to match the length of the encryption key used.
+ *
+ * For AES-XTS mode of operation, two keys must be provided and
+ * key.data must point to the two keys concatenated together (Key1 ||
+ * Key2). The cipher key length will contain the total size of both
+ * keys.
+ *
+ * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
+ * 192 bits (24 bytes) or 256 bits (32 bytes).
+ *
+ * For the CCM mode of operation, the only supported key length is 128
+ * bits (16 bytes).
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
+ * should be set to the combined length of the encryption key and the
+ * keymask. Since the keymask and the encryption key are the same size,
+ * key.length should be set to 2 x the AES encryption key length.
+ *
+ * For the AES-XTS mode of operation:
+ * - Two keys must be provided and key.length refers to total length of
+ * the two keys.
+ * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
+ * - Both keys must have the same size.
+ **/
+};
+
+/** Symmetric Authentication / Hash Algorithms */
+enum rte_crypto_auth_algorithm {
+ RTE_CRYPTO_AUTH_NULL = 1,
+ /**< NULL hash algorithm. */
+
+ RTE_CRYPTO_AUTH_AES_CBC_MAC,
+ /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
+ RTE_CRYPTO_AUTH_AES_CCM,
+ /**< AES algorithm in CCM mode. This is an authenticated cipher. When
+ * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
+ * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
+ * set up the related rte_crypto_cipher_setup_data structure in the
+ * session context or the corresponding parameter in the crypto
+ * operation data structures op_params parameter MUST be set for a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_CMAC,
+ /**< AES CMAC algorithm. */
+ RTE_CRYPTO_AUTH_AES_GCM,
+ /**< AES algorithm in GCM mode. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_GMAC,
+ /**< AES GMAC algorithm. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ /**< AES XCBC algorithm. */
+
+ RTE_CRYPTO_AUTH_KASUMI_F9,
+ /**< Kasumi algorithm in F9 mode. */
+
+ RTE_CRYPTO_AUTH_MD5,
+ /**< MD5 algorithm */
+ RTE_CRYPTO_AUTH_MD5_HMAC,
+ /**< HMAC using MD5 algorithm */
+
+ RTE_CRYPTO_AUTH_SHA1,
+ /**< 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA1_HMAC,
+ /**< HMAC using 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224,
+ /**< 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224_HMAC,
+ /**< HMAC using 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256,
+ /**< 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256_HMAC,
+ /**< HMAC using 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384,
+ /**< 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384_HMAC,
+ /**< HMAC using 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512,
+ /**< 512 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512_HMAC,
+ /**< HMAC using 512 bit SHA algorithm. */
+
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ /**< SNOW3G algorithm in UIA2 mode. */
+
+ RTE_CRYPTO_AUTH_ZUC_EIA3,
+ /**< ZUC algorithm in EIA3 mode */
+};
+
+/** Symmetric Authentication / Hash Operations */
+enum rte_crypto_auth_operation {
+ RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
+ RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
+};
+
+/**
+ * Authentication / Hash transform data.
+ *
+ * This structure contains data relating to an authentication/hash crypto
+ * transforms. The fields op, algo and digest_length are common to all
+ * authentication transforms and MUST be set.
+ */
+struct rte_crypto_auth_xform {
+ enum rte_crypto_auth_operation op;
+ /**< Authentication operation type */
+ enum rte_crypto_auth_algorithm algo;
+ /**< Authentication algorithm selection */
+
+ struct rte_crypto_key key;
+ /**< Authentication key data.
+ * The authentication key length MUST be less than or equal to the
+ * block size of the algorithm. It is the callers responsibility to
+ * ensure that the key length is compliant with the standard being used
+ * (for example RFC 2104, FIPS 198a).
+ */
+
+ uint32_t digest_length;
+ /**< Length of the digest to be returned. If the verify option is set,
+ * this specifies the length of the digest to be compared for the
+ * session.
+ *
+ * If the value is less than the maximum length allowed by the hash,
+ * the result shall be truncated. If the value is greater than the
+ * maximum length allowed by the hash then an error will be generated
+ * by *rte_cryptodev_sym_session_create* or by the
+ * *rte_cryptodev_sym_enqueue_burst* if using session-less APIs.
+ */
+
+ uint32_t add_auth_data_length;
+ /**< The length of the additional authenticated data (AAD) in bytes.
+ * The maximum permitted value is 240 bytes, unless otherwise specified
+ * below.
+ *
+ * This field must be specified when the hash algorithm is one of the
+ * following:
+ *
+ * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
+ * length of the IV (which should be 16).
+ *
+ * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
+ * the length of the Additional Authenticated Data (called A, in NIST
+ * SP800-38D).
+ *
+ * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
+ * the length of the associated data (called A, in NIST SP800-38C).
+ * Note that this does NOT include the length of any padding, or the
+ * 18 bytes reserved at the start of the above field to store the
+ * block B0 and the encoded length. The maximum permitted value in
+ * this case is 222 bytes.
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
+ * this field is not used and should be set to 0. Instead the length
+ * of the AAD data is specified in the message length to hash field of
+ * the rte_crypto_sym_op_data structure.
+ */
+};
+
+/** Crypto transformation types */
+enum rte_crypto_sym_xform_type {
+ RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
+ RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */
+ RTE_CRYPTO_SYM_XFORM_CIPHER /**< Cipher xform */
+};
+
+/**
+ * Symmetric crypto transform structure.
+ *
+ * This is used to specify the crypto transforms required, multiple transforms
+ * can be chained together to specify a chain transforms such as authentication
+ * then cipher, or cipher then authentication. Each transform structure can
+ * hold a single transform, the type field is used to specify which transform
+ * is contained within the union
+ */
+struct rte_crypto_sym_xform {
+ struct rte_crypto_sym_xform *next;
+ /**< next xform in chain */
+ enum rte_crypto_sym_xform_type type
+ ; /**< xform type */
+ union {
+ struct rte_crypto_auth_xform auth;
+ /**< Authentication / hash xform */
+ struct rte_crypto_cipher_xform cipher;
+ /**< Cipher xform */
+ };
+};
+
+/**
+ * Crypto operation session type. This is used to specify whether a crypto
+ * operation has session structure attached for immutable parameters or if all
+ * operation information is included in the operation data structure.
+ */
+enum rte_crypto_sym_op_sess_type {
+ RTE_CRYPTO_SYM_OP_WITH_SESSION, /**< Session based crypto operation */
+ RTE_CRYPTO_SYM_OP_SESSIONLESS /**< Session-less crypto operation */
+};
+
+
+/**
+ * Cryptographic Operation Data.
+ *
+ * This structure contains data relating to performing cryptographic processing
+ * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
+ * call for performing cipher, hash, or a combined hash and cipher operations.
+ */
+struct rte_crypto_sym_op {
+ enum rte_crypto_sym_op_sess_type type;
+ enum rte_crypto_op_status status;
+
+ struct {
+ struct rte_mbuf *m; /**< Destination mbuf */
+ uint8_t offset; /**< Data offset */
+ } dst;
+
+ union {
+ struct rte_cryptodev_sym_session *session;
+ /**< Handle for the initialised session context */
+ struct rte_crypto_sym_xform *xform;
+ /**< Session-less API crypto operation parameters */
+ };
+
+ struct {
+ struct {
+ uint32_t offset;
+ /**< Starting point for cipher processing, specified
+ * as number of bytes from start of data in the source
+ * buffer. The result of the cipher operation will be
+ * written back into the output buffer starting at
+ * this location.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source buffer
+ * on which the cryptographic operation will be
+ * computed. This must be a multiple of the block size
+ * if a block cipher is being used. This is also the
+ * same as the result length.
+ *
+ * @note
+ * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
+ * this value should not include the length of the
+ * padding or the length of the MAC; the driver will
+ * compute the actual number of bytes over which the
+ * encryption will occur, which will include these
+ * values.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
+ * field should be set to 0.
+ */
+ } to_cipher; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint32_t offset;
+ /**< Starting point for hash processing, specified as
+ * number of bytes from start of packet in source
+ * buffer.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
+ * mode of operation, this field specifies the start
+ * of the AAD data in the source buffer.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source
+ * buffer that the hash will be computed on.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
+ * of operation, this field specifies the length of
+ * the AAD data in the source buffer.
+ */
+ } to_hash; /**< Data offsets and length for authentication */
+ } data; /**< Details of data to be operated on */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
+ * Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length is 96
+ * bits) or J0 (for other sizes), where J0 is as defined by
+ * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
+ * needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the nonce
+ * should be written starting at &iv[1] (to allow space for the
+ * implementation to write in the flags in the first byte).
+ * Note that a full 16 bytes should be allocated, even though
+ * the length field will have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
+ * 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD be
+ * 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ size_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the length of the
+ * IV (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length of the
+ * counter (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
+ * which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce, which can
+ * be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result should be
+ * inserted (in the case of digest generation) or where the
+ * purported digest exists (in the case of digest
+ * verification).
+ *
+ * At session creation time, the client specified the digest
+ * result length with the digest_length member of the @ref
+ * rte_crypto_auth_xform structure. For physical crypto
+ * devices the caller must allocate at least digest_length of
+ * physically contiguous memory at this location.
+ *
+ * For digest generation, the digest result will overwrite
+ * any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is understood
+ * to be in the destination buffer for digest generation, and
+ * in the source buffer for digest verification. The location
+ * of the digest result in this case is immediately following
+ * the region over which the digest is computed.
+ */
+ phys_addr_t phys_addr; /**< Physical address of digest */
+ uint32_t length; /**< Length of digest */
+ } digest; /**< Digest parameters */
+
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD) needed for
+ * authenticated cipher mechanisms (CCM and GCM), and to the IV
+ * for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is set up
+ * for the session in the @ref rte_crypto_auth_xform structure
+ * as part of the @ref rte_cryptodev_sym_session_create function
+ * call. This length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
+ * caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset of one
+ * byte into the array, leaving room for the implementation
+ * to write in the flags to the first byte.
+ *
+ * - the additional authentication data itself should be
+ * written starting at an offset of 18 bytes into the array,
+ * leaving room for the length encoding in the first two
+ * bytes of the second block.
+ *
+ * - the array should be big enough to hold the above fields,
+ * plus any padding to round this up to the nearest multiple
+ * of the block size (16 bytes). Padding will be added by
+ * the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus any
+ * space to round this up to the nearest multiple of the
+ * block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set to 0.
+ * Instead the AAD data should be placed in the source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint32_t length; /**< Length of digest */
+ } additional_auth;
+ /**< Additional authentication parameters */
+
+ struct rte_mempool *pool;
+ /**< mempool used to allocate crypto op */
+
+ void *user_data;
+ /**< opaque pointer for user data */
+};
+
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ */
+static inline void
+__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
+{
+ op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
+ op->dst.m = NULL;
+ op->dst.offset = 0;
+}
+
+/** Attach a session to a crypto operation */
+static inline void
+rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ op->session = sess;
+ op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTO_SYM_H_ */
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 2838852..c7fef6a 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -572,8 +572,8 @@ rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id);
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id);
int
rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
@@ -604,8 +604,10 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
}
/* Setup Session mempool for device */
- return rte_crypto_session_pool_create(dev, config->session_mp.nb_objs,
- config->session_mp.cache_size, config->socket_id);
+ return rte_cryptodev_sym_session_pool_create(dev,
+ config->session_mp.nb_objs,
+ config->session_mp.cache_size,
+ config->socket_id);
}
@@ -911,12 +913,12 @@ rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
static void
-rte_crypto_session_init(struct rte_mempool *mp,
+rte_cryptodev_sym_session_init(struct rte_mempool *mp,
void *opaque_arg,
void *_sess,
__rte_unused unsigned i)
{
- struct rte_cryptodev_session *sess = _sess;
+ struct rte_cryptodev_sym_session *sess = _sess;
struct rte_cryptodev *dev = opaque_arg;
memset(sess, 0, mp->elt_size);
@@ -930,8 +932,8 @@ rte_crypto_session_init(struct rte_mempool *mp,
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id)
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id)
{
char mp_name[RTE_CRYPTODEV_NAME_MAX_LEN];
unsigned priv_sess_size;
@@ -951,7 +953,7 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
return -ENOMEM;
}
- unsigned elt_size = sizeof(struct rte_cryptodev_session) +
+ unsigned elt_size = sizeof(struct rte_cryptodev_sym_session) +
priv_sess_size;
dev->data->session_pool = rte_mempool_lookup(mp_name);
@@ -975,7 +977,8 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
0, /* private data size */
NULL, /* obj initialization constructor */
NULL, /* obj initialization constructor arg */
- rte_crypto_session_init, /* obj constructor */
+ rte_cryptodev_sym_session_init,
+ /**< obj constructor*/
dev, /* obj constructor arg */
socket_id, /* socket id */
0); /* flags */
@@ -990,11 +993,12 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
return 0;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform)
{
struct rte_cryptodev *dev;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
void *_sess;
if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
@@ -1010,7 +1014,7 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
return NULL;
}
- sess = (struct rte_cryptodev_session *)_sess;
+ sess = (struct rte_cryptodev_sym_session *)_sess;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_configure, NULL);
if (dev->dev_ops->session_configure(dev, xform, sess->_private) ==
@@ -1026,8 +1030,9 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
return sess;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id, struct rte_cryptodev_session *sess)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess)
{
struct rte_cryptodev *dev;
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 0ccd1b8..f4b38c1 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -1,6 +1,6 @@
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,14 +57,14 @@ extern "C" {
/**< Null crypto PMD device name */
#define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd")
/**< AES-NI Multi buffer PMD device name */
-#define CRYPTODEV_NAME_QAT_PMD ("cryptodev_qat_pmd")
-/**< Intel QAT PMD device name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd")
+/**< Intel QAT Symmetric Crypto PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
- RTE_CRYPTODEV_QAT_PMD, /**< QAT PMD */
+ RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT PMD Symmetric Crypto */
};
/* Logging Macros */
@@ -99,8 +99,11 @@ struct rte_cryptodev_info {
unsigned max_nb_queue_pairs;
/**< Maximum number of queues pairs supported by device. */
- unsigned max_nb_sessions;
- /**< Maximum number of sessions supported by device. */
+
+ struct {
+ unsigned max_nb_sessions;
+ /**< Maximum number of sessions supported by device. */
+ } sym;
};
#define RTE_CRYPTODEV_DETACHED (0)
@@ -575,6 +578,23 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
}
+/** Cryptodev symmetric crypto session */
+struct rte_cryptodev_sym_session {
+ struct {
+ uint8_t dev_id;
+ /**< Device Id */
+ enum rte_cryptodev_type type;
+ /** Crypto Device type session created on */
+ struct rte_mempool *mp;
+ /**< Mempool session allocated from */
+ } __rte_aligned(8);
+ /**< Public symmetric session details */
+
+ char _private[0];
+ /**< Private session material */
+};
+
+
/**
* Initialise a session for symmetric cryptographic operations.
*
@@ -596,24 +616,24 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
* @return
* Pointer to the created session or NULL
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id,
- struct rte_crypto_xform *xform);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform);
/**
* Free the memory associated with a previously allocated session.
*
* @param dev_id The device identifier.
* @param session Session pointer previously allocated by
- * *rte_cryptodev_session_create*.
+ * *rte_cryptodev_sym_session_create*.
*
* @return
* NULL on successful freeing of session.
* Session pointer on failure to free session.
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id,
- struct rte_cryptodev_session *session);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *session);
#ifdef __cplusplus
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index a16d109..7d049ea 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -1,6 +1,6 @@
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -358,7 +358,7 @@ typedef uint32_t (*cryptodev_queue_pair_count_t)(struct rte_cryptodev *dev);
* - On success returns a pointer to a rte_mempool
* - On failure returns a NULL pointer
*/
-typedef int (*cryptodev_create_session_pool_t)(
+typedef int (*cryptodev_sym_create_session_pool_t)(
struct rte_cryptodev *dev, unsigned nb_objs,
unsigned obj_cache_size, int socket_id);
@@ -372,7 +372,7 @@ typedef int (*cryptodev_create_session_pool_t)(
* - On success returns the size of the session structure for device
* - On failure returns 0
*/
-typedef unsigned (*cryptodev_get_session_private_size_t)(
+typedef unsigned (*cryptodev_sym_get_session_private_size_t)(
struct rte_cryptodev *dev);
/**
@@ -386,7 +386,7 @@ typedef unsigned (*cryptodev_get_session_private_size_t)(
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
+typedef void (*cryptodev_sym_initialize_session_t)(struct rte_mempool *mempool,
void *session_private);
/**
@@ -400,14 +400,14 @@ typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void * (*cryptodev_configure_session_t)(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+typedef void * (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
/**
* Free Crypto session.
* @param session Cryptodev session structure to free
*/
-typedef void (*cryptodev_free_session_t)(struct rte_cryptodev *dev,
+typedef void (*cryptodev_sym_free_session_t)(struct rte_cryptodev *dev,
void *session_private);
@@ -436,13 +436,13 @@ struct rte_cryptodev_ops {
cryptodev_queue_pair_count_t queue_pair_count;
/**< Get count of the queue pairs. */
- cryptodev_get_session_private_size_t session_get_size;
+ cryptodev_sym_get_session_private_size_t session_get_size;
/**< Return private session. */
- cryptodev_initialize_session_t session_initialize;
+ cryptodev_sym_initialize_session_t session_initialize;
/**< Initialization function for private session data */
- cryptodev_configure_session_t session_configure;
+ cryptodev_sym_configure_session_t session_configure;
/**< Configure a Crypto session. */
- cryptodev_free_session_t session_clear;
+ cryptodev_sym_free_session_t session_clear;
/**< Clear a Crypto sessions private data. */
};
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index ff8e93d..a46af6f 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -16,8 +16,8 @@ DPDK_2.2 {
rte_cryptodev_pmd_driver_register;
rte_cryptodev_pmd_release_device;
rte_cryptodev_pmd_virtual_dev_init;
- rte_cryptodev_session_create;
- rte_cryptodev_session_free;
+ rte_cryptodev_sym_session_create;
+ rte_cryptodev_sym_session_free;
rte_cryptodev_socket_id;
rte_cryptodev_start;
rte_cryptodev_stats_get;
@@ -29,4 +29,4 @@ DPDK_2.2 {
rte_cryptodev_queue_pair_stop;
local: *;
-};
\ No newline at end of file
+};
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
index 77993b6..5ce6058 100644
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ b/lib/librte_mbuf_offload/rte_mbuf_offload.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -67,7 +67,7 @@ extern "C" {
enum rte_mbuf_ol_op_type {
RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
/**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO
+ RTE_PKTMBUF_OL_CRYPTO_SYM
/**< Crypto offload operation */
};
@@ -84,7 +84,7 @@ struct rte_mbuf_offload {
enum rte_mbuf_ol_op_type type; /**< offload type */
union {
- struct rte_crypto_op crypto; /**< Crypto operation */
+ struct rte_crypto_sym_op crypto; /**< Crypto operation */
} op;
};
@@ -194,8 +194,8 @@ __rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
ol->type = type;
switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO:
- __rte_crypto_op_reset(&ol->op.crypto); break;
+ case RTE_PKTMBUF_OL_CRYPTO_SYM:
+ __rte_crypto_sym_op_reset(&ol->op.crypto); break;
default:
break;
}
@@ -278,24 +278,24 @@ __rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
* - On success returns pointer to first crypto xform in crypto operations chain
* - On failure returns NULL
*/
-static inline struct rte_crypto_xform *
-rte_pktmbuf_offload_alloc_crypto_xforms(struct rte_mbuf_offload *ol,
+static inline struct rte_crypto_sym_xform *
+rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
unsigned nb_xforms)
{
- struct rte_crypto_xform *xform;
+ struct rte_crypto_sym_xform *xform;
void *priv_data;
uint16_t size;
- size = sizeof(struct rte_crypto_xform) * nb_xforms;
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
if (priv_data == NULL)
return NULL;
- ol->op.crypto.xform = xform = (struct rte_crypto_xform *)priv_data;
+ ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
do {
- xform->type = RTE_CRYPTO_XFORM_NOT_SPECIFIED;
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
} while (xform);
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v8 3/5] cryptodev: remove unused phys_addr field from key
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 " Fiona Trahe
` (4 preceding siblings ...)
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 2/5] cryptodev: refactor to partition common from symmetric-specific code Fiona Trahe
@ 2016-03-10 13:42 ` Fiona Trahe
2016-03-10 13:43 ` [dpdk-dev] [PATCH v8 4/5] cryptodev: change burst API to be crypto op oriented Fiona Trahe
` (2 subsequent siblings)
8 siblings, 0 replies; 62+ messages in thread
From: Fiona Trahe @ 2016-03-10 13:42 UTC (permalink / raw)
To: dev
Remove unused phys_addr field from key in crypto_xform, simplifiy struct
and fix knock-on impacts in l2fwd-crypto app
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
---
examples/l2fwd-crypto/main.c | 42 ++++++++++++++++++++++++++---------
lib/librte_cryptodev/rte_crypto_sym.h | 16 ++++++-------
2 files changed, 39 insertions(+), 19 deletions(-)
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index ee519e7..9b6b7ef 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -112,6 +112,12 @@ enum l2fwd_crypto_xform_chain {
L2FWD_CRYPTO_HASH_CIPHER
};
+struct l2fwd_key {
+ uint8_t *data;
+ uint32_t length;
+ phys_addr_t phys_addr;
+};
+
/** l2fwd crypto application command line options */
struct l2fwd_crypto_options {
unsigned portmask;
@@ -127,7 +133,7 @@ struct l2fwd_crypto_options {
struct rte_crypto_sym_xform cipher_xform;
uint8_t ckey_data[32];
- struct rte_crypto_key iv_key;
+ struct l2fwd_key iv_key;
uint8_t ivkey_data[16];
struct rte_crypto_sym_xform auth_xform;
@@ -141,7 +147,7 @@ struct l2fwd_crypto_params {
unsigned digest_length;
unsigned block_size;
- struct rte_crypto_key iv_key;
+ struct l2fwd_key iv_key;
struct rte_cryptodev_sym_session *session;
};
@@ -744,7 +750,7 @@ parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
/** Parse crypto key command line argument */
static int
-parse_key(struct rte_crypto_key *key __rte_unused,
+parse_key(struct l2fwd_key *key __rte_unused,
unsigned length __rte_unused, char *arg __rte_unused)
{
printf("Currently an unsupported argument!\n");
@@ -820,11 +826,18 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return parse_cipher_op(&options->cipher_xform.cipher.op,
optarg);
- else if (strcmp(lgopts[option_index].name, "cipher_key") == 0)
- return parse_key(&options->cipher_xform.cipher.key,
- sizeof(options->ckey_data), optarg);
+ else if (strcmp(lgopts[option_index].name, "cipher_key") == 0) {
+ struct l2fwd_key key = { 0 };
+ int retval = 0;
+
+ retval = parse_key(&key, sizeof(options->ckey_data), optarg);
+
+ options->cipher_xform.cipher.key.data = key.data;
+ options->cipher_xform.cipher.key.length = key.length;
- else if (strcmp(lgopts[option_index].name, "iv") == 0)
+ return retval;
+
+ } else if (strcmp(lgopts[option_index].name, "iv") == 0)
return parse_key(&options->iv_key, sizeof(options->ivkey_data),
optarg);
@@ -837,11 +850,18 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return parse_auth_op(&options->auth_xform.auth.op,
optarg);
- else if (strcmp(lgopts[option_index].name, "auth_key") == 0)
- return parse_key(&options->auth_xform.auth.key,
- sizeof(options->akey_data), optarg);
+ else if (strcmp(lgopts[option_index].name, "auth_key") == 0) {
+ struct l2fwd_key key = { 0 };
+ int retval = 0;
+
+ retval = parse_key(&key, sizeof(options->akey_data), optarg);
+
+ options->auth_xform.auth.key.data = key.data;
+ options->auth_xform.auth.key.length = key.length;
+
+ return retval;
- else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
+ } else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
options->sessionless = 1;
return 0;
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index de6c701..270510e 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -111,12 +111,6 @@ enum rte_crypto_cipher_operation {
/**< Decrypt cipher operation */
};
-/** Crypto key structure */
-struct rte_crypto_key {
- uint8_t *data; /**< pointer to key data */
- phys_addr_t phys_addr;
- size_t length; /**< key length in bytes */
-};
/**
* Symmetric Cipher Setup Data.
@@ -133,7 +127,10 @@ struct rte_crypto_cipher_xform {
enum rte_crypto_cipher_algorithm algo;
/**< Cipher algorithm */
- struct rte_crypto_key key;
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } key;
/**< Cipher key
*
* For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
@@ -257,7 +254,10 @@ struct rte_crypto_auth_xform {
enum rte_crypto_auth_algorithm algo;
/**< Authentication algorithm selection */
- struct rte_crypto_key key;
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } key;
/**< Authentication key data.
* The authentication key length MUST be less than or equal to the
* block size of the algorithm. It is the callers responsibility to
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v8 4/5] cryptodev: change burst API to be crypto op oriented
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 " Fiona Trahe
` (5 preceding siblings ...)
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 3/5] cryptodev: remove unused phys_addr field from key Fiona Trahe
@ 2016-03-10 13:43 ` Fiona Trahe
2016-03-10 14:03 ` Thomas Monjalon
2016-03-10 13:43 ` [dpdk-dev] [PATCH v8 5/5] mbuf_offload: remove library Fiona Trahe
2016-03-15 5:21 ` [dpdk-dev] [PATCH v7 0/2] cryptodev API changes Cao, Min
8 siblings, 1 reply; 62+ messages in thread
From: Fiona Trahe @ 2016-03-10 13:43 UTC (permalink / raw)
To: dev
From: Declan Doherty <declan.doherty@intel.com>
This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts
rte_crypto_op's rather than the current implementation which operates on
rte_mbuf bursts, this simplifies the burst processing in the crypto PMDs and the
use of crypto operations in general, including new functions for managing
rte_crypto_op pools.
These changes continues the separation of the symmetric operation parameters
from the more general operation parameters, which will simplify the integration
of asymmetric crypto operations in the future.
PMDs, unit tests and sample applications are also modified to work with the
modified and new API.
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
---
app/test/test_cryptodev.c | 640 +++++++++++----------
app/test/test_cryptodev_perf.c | 221 ++++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 133 +++--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 12 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 4 +-
drivers/crypto/qat/qat_crypto.c | 132 +++--
drivers/crypto/qat/qat_crypto.h | 12 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 4 +-
examples/l2fwd-crypto/main.c | 164 +++---
lib/librte_cryptodev/rte_crypto.h | 352 +++++++++++-
lib/librte_cryptodev/rte_crypto_sym.h | 374 ++++++------
lib/librte_cryptodev/rte_cryptodev.c | 76 +++
lib/librte_cryptodev/rte_cryptodev.h | 108 ++--
lib/librte_cryptodev/rte_cryptodev_version.map | 5 +-
14 files changed, 1398 insertions(+), 839 deletions(-)
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 5ced183..55367df 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -48,7 +48,7 @@ static enum rte_cryptodev_type gbl_cryptodev_type;
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
struct rte_cryptodev_config conf;
struct rte_cryptodev_qp_conf qp_conf;
@@ -62,8 +62,7 @@ struct crypto_unittest_params {
struct rte_cryptodev_sym_session *sess;
- struct rte_mbuf_offload *ol;
- struct rte_crypto_sym_op *op;
+ struct rte_crypto_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -112,19 +111,21 @@ hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
}
#endif
-static struct rte_mbuf *
-process_crypto_request(uint8_t dev_id, struct rte_mbuf *ibuf)
+static struct rte_crypto_op *
+process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)
{
- struct rte_mbuf *obuf = NULL;
#if HEX_DUMP
hexdump_mbuf_data(stdout, "Enqueued Packet", ibuf);
#endif
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &ibuf, 1) != 1) {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
printf("Error sending packet for encryption");
return NULL;
}
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &obuf, 1) == 0)
+
+ op = NULL;
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
rte_pause();
#if HEX_DUMP
@@ -132,7 +133,7 @@ process_crypto_request(uint8_t dev_id, struct rte_mbuf *ibuf)
hexdump_mbuf_data(stdout, "Dequeued Packet", obuf);
#endif
- return obuf;
+ return op;
}
static struct crypto_testsuite_params testsuite_params = { NULL };
@@ -162,13 +163,14 @@ testsuite_setup(void)
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "MBUF_OFFLOAD_POOL",
+ ts_params->op_mpool = rte_crypto_op_pool_create(
+ "MBUF_CRYPTO_SYM_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -253,10 +255,9 @@ testsuite_teardown(void)
rte_mempool_count(ts_params->mbuf_pool));
}
-
- if (ts_params->mbuf_ol_pool != NULL) {
+ if (ts_params->op_mpool != NULL) {
RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n",
- rte_mempool_count(ts_params->mbuf_ol_pool));
+ rte_mempool_count(ts_params->op_mpool));
}
}
@@ -326,8 +327,8 @@ ut_teardown(void)
}
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
/*
* free mbuf - both obuf and ibuf are usually the same,
@@ -793,53 +794,59 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
- /* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ /* Generate crypto op data structure */
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ /* Set crypto operation authentication parameters */
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ /* Set crypto operation cipher parameters */
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ uint8_t *ciphertext = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC);
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(ciphertext,
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
"ciphertext data not as expected");
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ uint8_t *digest = ciphertext + QUOTE_512_BYTES;
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(digest,
catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest,
gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 :
@@ -864,60 +871,66 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
-
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
- ut_params->ol, 2),
+ TEST_ASSERT_NOT_NULL(rte_crypto_op_sym_xforms_alloc(ut_params->op, 2),
"failed to allocate space for crypto transforms");
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ sym_op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
/* cipher parameters */
- ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- ut_params->op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->op->xform->cipher.key.data = aes_cbc_key;
- ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
+ sym_op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ sym_op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ sym_op->xform->cipher.key.data = aes_cbc_key;
+ sym_op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ sym_op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- ut_params->op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
- ut_params->op->xform->next->auth.key.data = hmac_sha1_key;
- ut_params->op->xform->next->auth.digest_length =
+ sym_op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ sym_op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ sym_op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
+ sym_op->xform->next->auth.key.data = hmac_sha1_key;
+ sym_op->xform->next->auth.digest_length =
DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -986,42 +999,48 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ /* attach symmetric crypto session to crypto operations */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
+
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1089,47 +1108,51 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1198,42 +1221,47 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
- /* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1312,43 +1340,46 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
-
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1448,43 +1479,46 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
- /* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1522,10 +1556,6 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
catch_22_quote, QUOTE_512_BYTES, 0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_AES_XCBC);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
@@ -1546,54 +1576,71 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->iv.data = (uint8_t *)
- rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ /* Set operation cipher parameters */
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->m_src, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(sym_op->m_src);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
+
+ /* Set operation authentication parameters */
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ sym_op->m_src, DIGEST_BYTE_LENGTH_AES_XCBC);
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->m_src,
+ CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES);
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+ memset(sym_op->auth.digest.data, 0, DIGEST_BYTE_LENGTH_AES_XCBC);
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC),
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
"Ciphertext data not as expected");
+
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ rte_pktmbuf_mtod_offset(
+ ut_params->op->sym->m_src, uint8_t *,
+ CIPHER_IV_LENGTH_AES_CBC +
+ QUOTE_512_BYTES),
catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
DIGEST_BYTE_LENGTH_AES_XCBC,
"Generated digest data not as expected");
@@ -1612,14 +1659,6 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
(const char *)catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES, 0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_AES_XCBC);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
- rte_memcpy(ut_params->digest,
- catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
- DIGEST_BYTE_LENGTH_AES_XCBC);
-
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
@@ -1646,35 +1685,55 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ /* Set crypto operation data parameters */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ ut_params->ibuf, DIGEST_BYTE_LENGTH_AES_XCBC);
+ TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+ "no room to append digest");
+
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ ut_params->ibuf, QUOTE_512_BYTES);
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+ rte_memcpy(sym_op->auth.digest.data,
+ catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
+ DIGEST_BYTE_LENGTH_AES_XCBC);
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1835,50 +1894,53 @@ test_not_in_place_crypto(void)
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+ sym_op->m_dst = dst_m;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
-
- ut_params->op->dst.m = dst_m;
- ut_params->op->dst.offset = 0;
-
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "no crypto operation returned");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto operation processing failed");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->op->dst.m, char *),
+ rte_pktmbuf_mtod(ut_params->op->sym->m_dst, char *),
catch_22_quote,
QUOTE_512_BYTES,
"Plaintext data not as expected");
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index b0c8abf..b43f9aa 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -50,7 +50,7 @@
struct crypto_testsuite_params {
struct rte_mempool *mbuf_mp;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
uint16_t nb_queue_pairs;
@@ -68,8 +68,7 @@ struct crypto_unittest_params {
struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_sym_op *op;
- struct rte_mbuf_offload *ol;
+ struct rte_crypto_op *op;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
struct rte_mbuf *ibuf[MAX_NUM_OF_OPS_PER_UT];
@@ -79,7 +78,7 @@ struct crypto_unittest_params {
static struct rte_mbuf *
setup_test_string(struct rte_mempool *mpool,
- const char *string, size_t len, uint8_t blocksize)
+ const uint8_t *data, size_t len, uint8_t blocksize)
{
struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
size_t t_len = len - (blocksize ? (len % blocksize) : 0);
@@ -92,7 +91,7 @@ setup_test_string(struct rte_mempool *mpool,
return NULL;
}
- rte_memcpy(dst, string, t_len);
+ rte_memcpy(dst, (const void *)data, t_len);
}
return m;
}
@@ -113,23 +112,24 @@ testsuite_setup(void)
ts_params->mbuf_mp = rte_mempool_lookup("CRYPTO_PERF_MBUFPOOL");
if (ts_params->mbuf_mp == NULL) {
/* Not already created so create */
- ts_params->mbuf_mp = rte_mempool_create("CRYPTO_PERF_MBUFPOOL", NUM_MBUFS,
- MBUF_SIZE, MBUF_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
- rte_socket_id(), 0);
+ ts_params->mbuf_mp = rte_pktmbuf_pool_create(
+ "CRYPTO_PERF_MBUFPOOL",
+ NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
+ rte_socket_id());
if (ts_params->mbuf_mp == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_PERF_MBUFPOOL\n");
return TEST_FAILED;
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
- NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_sym_xform),
- rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+
+ ts_params->op_mpool = rte_crypto_op_pool_create("CRYPTO_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ NUM_MBUFS, MBUF_CACHE_SIZE,
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_sym_xform),
+ rte_socket_id());
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -256,8 +256,8 @@ ut_teardown(void)
ut_params->sess);
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
for (i = 0; i < MAX_NUM_OF_OPS_PER_UT; i++) {
if (ut_params->obuf[i])
@@ -1698,11 +1698,12 @@ struct crypto_data_params aes_cbc_hmac_sha256_output[MAX_PACKET_SIZE_INDEX] = {
static int
test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
{
- uint32_t num_to_submit = 2048, max_outstanding_reqs = 512;
- struct rte_mbuf *rx_mbufs[num_to_submit], *tx_mbufs[num_to_submit];
+ uint32_t num_to_submit = 4096;
+ struct rte_crypto_op *c_ops[num_to_submit];
+ struct rte_crypto_op *proc_ops[num_to_submit];
uint64_t failed_polls, retries, start_cycles, end_cycles, total_cycles = 0;
uint32_t burst_sent, burst_received;
- uint32_t b, burst_size, num_sent, num_received;
+ uint32_t i, burst_size, num_sent, num_received;
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1739,46 +1740,50 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure(s) */
- for (b = 0; b < num_to_submit ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ for (i = 0; i < num_to_submit ; i++) {
+ struct rte_mbuf *m = setup_test_string(ts_params->mbuf_mp,
+ data_params[0].expected.ciphertext,
data_params[0].length, 0);
- TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
+ TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(tx_mbufs[b],
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ TEST_ASSERT_NOT_NULL(ut_params->digest,
+ "no room to append digest");
rte_memcpy(ut_params->digest, data_params[0].expected.digest,
DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_sym_op *cop = &ol->op.crypto;
+ struct rte_crypto_op *op =
+ rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+ rte_crypto_op_attach_sym_session(op, ut_params->sess);
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
+ op->sym->auth.digest.data = ut_params->digest;
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
data_params[0].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->auth.data.length = data_params[0].length;
+
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ op->sym->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(m,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[0].length;
+ op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.data.length = data_params[0].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[0].length;
+ op->sym->m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[i] = op;
}
printf("\nTest to measure the IA cycle cost using AES128_CBC_SHA256_HMAC "
@@ -1789,17 +1794,17 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
printf("\nDev No\tQP No\tNum Sent\tNum Received\tTx/Rx burst");
printf("\tRetries (Device Busy)\tAverage IA cycle cost "
"(assuming 0 retries)");
- for (b = 2; b <= 128 ; b *= 2) {
+ for (i = 2; i <= 128 ; i *= 2) {
num_sent = 0;
num_received = 0;
retries = 0;
failed_polls = 0;
- burst_size = b;
+ burst_size = i;
total_cycles = 0;
while (num_sent < num_to_submit) {
start_cycles = rte_rdtsc_precise();
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0,
- &tx_mbufs[num_sent],
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, &c_ops[num_sent],
((num_to_submit-num_sent) < burst_size) ?
num_to_submit-num_sent : burst_size);
if (burst_sent == 0)
@@ -1814,9 +1819,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1831,9 +1835,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_cryptodev_enqueue_burst(dev_num, 0,
NULL, 0);
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1847,16 +1850,9 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
printf("\n");
- for (b = 0; b < max_outstanding_reqs ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ for (i = 0; i < num_to_submit ; i++) {
+ rte_pktmbuf_free(c_ops[i]->sym->m_src);
+ rte_crypto_op_free(c_ops[i]);
}
return TEST_SUCCESS;
}
@@ -1870,7 +1866,10 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
uint64_t failed_polls, retries, start_cycles, end_cycles;
const uint64_t mhz = rte_get_tsc_hz()/1000000;
double throughput, mmps;
- struct rte_mbuf *rx_mbufs[DEFAULT_BURST_SIZE], *tx_mbufs[DEFAULT_BURST_SIZE];
+
+ struct rte_crypto_op *c_ops[DEFAULT_BURST_SIZE];
+ struct rte_crypto_op *proc_ops[DEFAULT_BURST_SIZE];
+
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1919,63 +1918,70 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
+ struct rte_mbuf *m = setup_test_string(
+ ts_params->mbuf_mp,
+ (const uint8_t *)
data_params[index].plaintext,
data_params[index].length,
0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(
- tx_mbufs[b], DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
+ DIGEST_BYTE_LENGTH_SHA256);
+ TEST_ASSERT_NOT_NULL(ut_params->digest
+ , "no room to append digest");
- rte_memcpy(ut_params->digest, data_params[index].expected.digest,
- DIGEST_BYTE_LENGTH_SHA256);
+ rte_memcpy(ut_params->digest,
+ data_params[index].expected.digest,
+ DIGEST_BYTE_LENGTH_SHA256);
+
+ struct rte_crypto_op *op = rte_crypto_op_alloc(
+ ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
+ rte_crypto_op_attach_sym_session(op, ut_params->sess);
- struct rte_crypto_sym_op *cop = &ol->op.crypto;
+ op->sym->auth.digest.data = ut_params->digest;
+ op->sym->auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(m,
+ data_params[index].length);
+ op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+ op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->auth.data.length = data_params[index].length;
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
- tx_mbufs[b], data_params[index].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym->cipher.iv.data = (uint8_t *)
+ rte_pktmbuf_prepend(m,
+ CIPHER_IV_LENGTH_AES_CBC);
+ op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.data.length = data_params[index].length;
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[index].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[index].length;
+ op->sym->m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[b] = op;
}
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num,
- 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
- < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
- DEFAULT_BURST_SIZE);
+ uint16_t burst_size = (DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent) < DEFAULT_BURST_SIZE ?
+ DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent : DEFAULT_BURST_SIZE;
+
+ burst_sent = rte_cryptodev_enqueue_burst(
+ dev_num, 0, c_ops, burst_size);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, DEFAULT_BURST_SIZE);
+ burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ 0, proc_ops, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
@@ -1987,9 +1993,9 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
rte_cryptodev_enqueue_burst(dev_num, 0,
NULL, 0);
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num, 0,
- rx_mbufs, DEFAULT_BURST_SIZE);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops,
+ DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
@@ -2006,15 +2012,8 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
printf("\t%.2f\t%.2f", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ rte_pktmbuf_free(c_ops[b]->sym->m_src);
+ rte_crypto_op_free(c_ops[b]);
}
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index f39ebd5..f28b29f 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -296,16 +296,16 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(crypto_op->session->type !=
+ if (op->sym->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
- sess = (struct aesni_mb_session *)crypto_op->session->_private;
+ sess = (struct aesni_mb_session *)op->sym->session->_private;
} else {
void *_sess = NULL;
@@ -316,7 +316,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
- sess, crypto_op->xform) != 0)) {
+ sess, op->sym->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
}
@@ -338,12 +338,14 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
* - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
*/
static JOB_AES_HMAC *
-process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_sym_op *c_op,
+process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
+ struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+ uint16_t m_offset = 0;
+
job = (*qp->ops->job.get_next)(&qp->mb_mgr);
if (unlikely(job == NULL))
return job;
@@ -372,10 +374,26 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
}
/* Mutable crypto operation parameters */
+ if (op->sym->m_dst) {
+ m_src = m_dst = op->sym->m_dst;
+
+ /* append space for output data to mbuf */
+ char *odata = rte_pktmbuf_append(m_dst,
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (odata == NULL)
+ MB_LOG_ERR("failed to allocate space in destination "
+ "mbuf for source data");
+
+ memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ } else {
+ m_dst = m_src;
+ m_offset = op->sym->cipher.data.offset;
+ }
/* Set digest output location */
if (job->cipher_direction == DECRYPT) {
- job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m,
+ job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
get_digest_byte_length(job->hash_alg));
if (job->auth_tag_output == NULL) {
@@ -388,7 +406,7 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
sizeof(get_digest_byte_length(job->hash_alg)));
} else {
- job->auth_tag_output = c_op->digest.data;
+ job->auth_tag_output = op->sym->auth.digest.data;
}
/*
@@ -399,26 +417,22 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
get_truncated_digest_byte_length(job->hash_alg);
/* Set IV parameters */
- job->iv = c_op->iv.data;
- job->iv_len_in_bytes = c_op->iv.length;
+ job->iv = op->sym->cipher.iv.data;
+ job->iv_len_in_bytes = op->sym->cipher.iv.length;
/* Data Parameter */
- job->src = rte_pktmbuf_mtod(m, uint8_t *);
- job->dst = c_op->dst.m ?
- rte_pktmbuf_mtod(c_op->dst.m, uint8_t *) +
- c_op->dst.offset :
- rte_pktmbuf_mtod(m, uint8_t *) +
- c_op->data.to_cipher.offset;
+ job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
- job->cipher_start_src_offset_in_bytes = c_op->data.to_cipher.offset;
- job->msg_len_to_cipher_in_bytes = c_op->data.to_cipher.length;
+ job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
- job->hash_start_src_offset_in_bytes = c_op->data.to_hash.offset;
- job->msg_len_to_hash_in_bytes = c_op->data.to_hash.length;
+ job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
/* Set user data to be crypto operation data struct */
- job->user_data = m;
- job->user_data2 = c_op;
+ job->user_data = op;
+ job->user_data2 = m_dst;
return job;
}
@@ -433,43 +447,41 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
* verification of supplied digest in the case of a HASH_CIPHER operation
* - Returns NULL on invalid job
*/
-static struct rte_mbuf *
+static struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m;
- struct rte_crypto_sym_op *c_op;
+ struct rte_crypto_op *op =
+ (struct rte_crypto_op *)job->user_data;
+ struct rte_mbuf *m_dst =
+ (struct rte_mbuf *)job->user_data2;
- if (job->user_data == NULL)
+ if (op == NULL || m_dst == NULL)
return NULL;
- /* handled retrieved job */
- m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_sym_op *)job->user_data2;
-
/* set status as successful by default */
- c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* check if job has been processed */
if (unlikely(job->status != STS_COMPLETED)) {
- c_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return m;
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return op;
} else if (job->chain_order == HASH_CIPHER) {
/* Verify digest if required */
- if (memcmp(job->auth_tag_output, c_op->digest.data,
+ if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
job->auth_tag_output_len_in_bytes) != 0)
- c_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m, get_digest_byte_length(job->hash_alg));
+ rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
- rte_mempool_put(qp->sess_mp, c_op->session);
- c_op->session = NULL;
+ if (op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
}
- return m;
+ return op;
}
/**
@@ -485,17 +497,16 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
static unsigned
handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m = NULL;
+ struct rte_crypto_op *op = NULL;
unsigned processed_jobs = 0;
while (job) {
processed_jobs++;
- m = post_process_mb_job(qp, job);
- if (m)
- rte_ring_enqueue(qp->processed_pkts, (void *)m);
+ op = post_process_mb_job(qp, job);
+ if (op)
+ rte_ring_enqueue(qp->processed_ops, (void *)op);
else
qp->stats.dequeue_err_count++;
-
job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
}
@@ -503,11 +514,9 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
static uint16_t
-aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
- uint16_t nb_bufs)
+aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
-
struct aesni_mb_session *sess;
struct aesni_mb_qp *qp = queue_pair;
@@ -515,21 +524,23 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
int i, processed_jobs = 0;
- for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i],
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- if (unlikely(ol == NULL)) {
+ for (i = 0; i < nb_ops; i++) {
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ MB_LOG_ERR("PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
qp->stats.enqueue_err_count++;
goto flush_jobs;
}
-
- sess = get_session(qp, &ol->op.crypto);
+#endif
+ sess = get_session(qp, ops[i]);
if (unlikely(sess == NULL)) {
qp->stats.enqueue_err_count++;
goto flush_jobs;
}
- job = process_crypto_op(qp, bufs[i], &ol->op.crypto, sess);
+ job = process_crypto_op(qp, ops[i], sess);
if (unlikely(job == NULL)) {
qp->stats.enqueue_err_count++;
goto flush_jobs;
@@ -565,15 +576,15 @@ flush_jobs:
}
static uint16_t
-aesni_mb_pmd_dequeue_burst(void *queue_pair,
- struct rte_mbuf **bufs, uint16_t nb_bufs)
+aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
struct aesni_mb_qp *qp = queue_pair;
unsigned nb_dequeued;
- nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)bufs, nb_bufs);
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)ops, nb_ops);
qp->stats.dequeued_count += nb_dequeued;
return nb_dequeued;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index d56de12..b1dd103 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -138,9 +138,9 @@ aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
return 0;
}
-/** Create a ring to place process packets on */
+/** Create a ring to place processed operations on */
static struct rte_ring *
-aesni_mb_pmd_qp_create_processed_pkts_ring(struct aesni_mb_qp *qp,
+aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
unsigned ring_size, int socket_id)
{
struct rte_ring *r;
@@ -148,12 +148,12 @@ aesni_mb_pmd_qp_create_processed_pkts_ring(struct aesni_mb_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
if (r->prod.size >= ring_size) {
- MB_LOG_INFO("Reusing existing ring %s for processed packets",
+ MB_LOG_INFO("Reusing existing ring %s for processed ops",
qp->name);
return r;
}
- MB_LOG_ERR("Unable to reuse existing ring %s for processed packets",
+ MB_LOG_ERR("Unable to reuse existing ring %s for processed ops",
qp->name);
return NULL;
}
@@ -189,9 +189,9 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp->ops = &job_ops[internals->vector_mode];
- qp->processed_pkts = aesni_mb_pmd_qp_create_processed_pkts_ring(qp,
+ qp->processed_ops = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
qp_conf->nb_descriptors, socket_id);
- if (qp->processed_pkts == NULL)
+ if (qp->processed_ops == NULL)
goto qp_setup_cleanup;
qp->sess_mp = dev->data->session_pool;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 0aed177..949d9a6 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -146,8 +146,8 @@ struct aesni_mb_qp {
/**< Vector mode dependent pointer table of the multi-buffer APIs */
MB_MGR mb_mgr;
/**< Multi-buffer instance */
- struct rte_ring *processed_pkts;
- /**< Ring for placing process packets */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing process operations */
struct rte_mempool *sess_mp;
/**< Session Mempool */
struct rte_cryptodev_stats stats;
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index e7b9027..11f7fb2 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -72,7 +72,7 @@ static inline uint32_t
adf_modulo(uint32_t data, uint32_t shift);
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg);
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
void *session)
@@ -275,15 +275,16 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
register struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
- register uint32_t nb_pkts_sent = 0;
- register struct rte_mbuf **cur_tx_pkt = tx_pkts;
+ register uint32_t nb_ops_sent = 0;
+ register struct rte_crypto_op **cur_op = ops;
register int ret;
- uint16_t nb_pkts_possible = nb_pkts;
+ uint16_t nb_ops_possible = nb_ops;
register uint8_t *base_addr;
register uint32_t tail;
int overflow;
@@ -294,47 +295,44 @@ uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = queue->tail;
/* Find how many can actually fit on the ring */
- overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_pkts)
+ overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
- queue->max_inflights;
if (overflow > 0) {
rte_atomic16_sub(&tmp_qp->inflights16, overflow);
- nb_pkts_possible = nb_pkts - overflow;
- if (nb_pkts_possible == 0)
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
return 0;
}
- while (nb_pkts_sent != nb_pkts_possible) {
-
- ret = qat_alg_write_mbuf_entry(*cur_tx_pkt,
- base_addr + tail);
+ while (nb_ops_sent != nb_ops_possible) {
+ ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
- if (nb_pkts_sent == 0)
+ if (nb_ops_sent == 0)
return 0;
goto kick_tail;
}
tail = adf_modulo(tail + queue->msg_size, queue->modulo);
- nb_pkts_sent++;
- cur_tx_pkt++;
+ nb_ops_sent++;
+ cur_op++;
}
kick_tail:
WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
queue->hw_queue_number, tail);
queue->tail = tail;
- tmp_qp->stats.enqueued_count += nb_pkts_sent;
- return nb_pkts_sent;
+ tmp_qp->stats.enqueued_count += nb_ops_sent;
+ return nb_ops_sent;
}
uint16_t
-qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
uint32_t msg_counter = 0;
- struct rte_mbuf *rx_mbuf;
+ struct rte_crypto_op *rx_op;
struct icp_qat_fw_comn_resp *resp_msg;
queue = &(tmp_qp->rx_q);
@@ -342,17 +340,20 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
((uint8_t *)queue->base_addr + queue->head);
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
- msg_counter != nb_pkts) {
- rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
+ msg_counter != nb_ops) {
+ rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
*(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
queue->head = adf_modulo(queue->head +
@@ -361,9 +362,8 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
resp_msg = (struct icp_qat_fw_comn_resp *)
((uint8_t *)queue->base_addr +
queue->head);
-
- *rx_pkts = rx_mbuf;
- rx_pkts++;
+ *ops = rx_op;
+ ops++;
msg_counter++;
}
if (msg_counter > 0) {
@@ -377,38 +377,36 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
}
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
{
- struct rte_mbuf_offload *ol;
-
struct qat_session *ctx;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
- if (unlikely(ol == NULL)) {
- PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
- "to (%p) mbuf.", mbuf);
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
return -EINVAL;
}
-
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
+#endif
+ if (unlikely(op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
- " requests mbuf (%p) is sessionless.", mbuf);
+ " requests, op (%p) is sessionless.", op);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type
- != RTE_CRYPTODEV_QAT_SYM_PMD)) {
+ if (unlikely(op->sym->session->type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
- ctx = (struct qat_session *)ol->op.crypto.session->_private;
+ ctx = (struct qat_session *)op->sym->session->_private;
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
*qat_req = ctx->fw_req;
- qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)mbuf;
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
/*
* The following code assumes:
@@ -416,37 +414,37 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
* - always in place.
*/
qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length = mbuf->data_len;
+ qat_req->comn_mid.src_length =
+ rte_pktmbuf_data_len(op->sym->m_src);
qat_req->comn_mid.dest_data_addr =
qat_req->comn_mid.src_data_addr =
- rte_pktmbuf_mtophys(mbuf);
-
+ rte_pktmbuf_mtophys(op->sym->m_src);
cipher_param = (void *)&qat_req->serv_specif_rqpars;
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
- cipher_param->cipher_length = ol->op.crypto.data.to_cipher.length;
- cipher_param->cipher_offset = ol->op.crypto.data.to_cipher.offset;
- if (ol->op.crypto.iv.length &&
- (ol->op.crypto.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array))) {
+ cipher_param->cipher_length = op->sym->cipher.data.length;
+ cipher_param->cipher_offset = op->sym->cipher.data.offset;
+ if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array))) {
rte_memcpy(cipher_param->u.cipher_IV_array,
- ol->op.crypto.iv.data, ol->op.crypto.iv.length);
+ op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
} else {
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr = ol->op.crypto.iv.phys_addr;
+ cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr;
}
- if (ol->op.crypto.digest.phys_addr) {
+ if (op->sym->auth.digest.phys_addr) {
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
- auth_param->auth_res_addr = ol->op.crypto.digest.phys_addr;
+ auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
}
- auth_param->auth_off = ol->op.crypto.data.to_hash.offset;
- auth_param->auth_len = ol->op.crypto.data.to_hash.length;
- auth_param->u1.aad_adr = ol->op.crypto.additional_auth.phys_addr;
+ auth_param->auth_off = op->sym->auth.data.offset;
+ auth_param->auth_len = op->sym->auth.data.length;
+ auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
/* (GCM) aad length(240 max) will be at this location after precompute */
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
@@ -457,9 +455,19 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
}
auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));
+ rte_hexdump(stdout, "src_data:",
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ op->sym->auth.digest.length);
+ rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
+ op->sym->auth.aad.length);
#endif
return 0;
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index e9f71fe..9323383 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -115,12 +115,12 @@ extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
-uint16_t
-qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
-uint16_t
-qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 85700fc..5e51aca 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -92,8 +92,8 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst;
internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 9b6b7ef..6aaa7c0 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -104,6 +104,11 @@ struct pkt_buffer {
struct rte_mbuf *buffer[MAX_PKT_BURST];
};
+struct op_buffer {
+ unsigned len;
+ struct rte_crypto_op *buffer[MAX_PKT_BURST];
+};
+
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
@@ -159,8 +164,8 @@ struct lcore_queue_conf {
unsigned nb_crypto_devs;
unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
- struct pkt_buffer crypto_pkt_buf[RTE_MAX_ETHPORTS];
- struct pkt_buffer tx_pkt_buf[RTE_MAX_ETHPORTS];
+ struct op_buffer op_buf[RTE_MAX_ETHPORTS];
+ struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
} __rte_cache_aligned;
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
@@ -180,7 +185,7 @@ static const struct rte_eth_conf port_conf = {
};
struct rte_mempool *l2fwd_pktmbuf_pool;
-struct rte_mempool *l2fwd_mbuf_ol_pool;
+struct rte_mempool *l2fwd_crypto_op_pool;
/* Per-port statistics struct */
struct l2fwd_port_statistics {
@@ -294,20 +299,21 @@ static int
l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
struct l2fwd_crypto_params *cparams)
{
- struct rte_mbuf **pkt_buffer;
+ struct rte_crypto_op **op_buffer;
unsigned ret;
- pkt_buffer = (struct rte_mbuf **)
- qconf->crypto_pkt_buf[cparams->dev_id].buffer;
+ op_buffer = (struct rte_crypto_op **)
+ qconf->op_buf[cparams->dev_id].buffer;
+
+ ret = rte_cryptodev_enqueue_burst(cparams->dev_id,
+ cparams->qp_id, op_buffer, (uint16_t) n);
- ret = rte_cryptodev_enqueue_burst(cparams->dev_id, cparams->qp_id,
- pkt_buffer, (uint16_t) n);
crypto_statistics[cparams->dev_id].enqueued += ret;
if (unlikely(ret < n)) {
crypto_statistics[cparams->dev_id].errors += (n - ret);
do {
- rte_pktmbuf_offload_free(pkt_buffer[ret]->offload_ops);
- rte_pktmbuf_free(pkt_buffer[ret]);
+ rte_pktmbuf_free(op_buffer[ret]->sym->m_src);
+ rte_crypto_op_free(op_buffer[ret]);
} while (++ret < n);
}
@@ -315,7 +321,8 @@ l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
}
static int
-l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
+l2fwd_crypto_enqueue(struct rte_crypto_op *op,
+ struct l2fwd_crypto_params *cparams)
{
unsigned lcore_id, len;
struct lcore_queue_conf *qconf;
@@ -323,23 +330,23 @@ l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->crypto_pkt_buf[cparams->dev_id].len;
- qconf->crypto_pkt_buf[cparams->dev_id].buffer[len] = m;
+ len = qconf->op_buf[cparams->dev_id].len;
+ qconf->op_buf[cparams->dev_id].buffer[len] = op;
len++;
- /* enough pkts to be sent */
+ /* enough ops to be sent */
if (len == MAX_PKT_BURST) {
l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams);
len = 0;
}
- qconf->crypto_pkt_buf[cparams->dev_id].len = len;
+ qconf->op_buf[cparams->dev_id].len = len;
return 0;
}
static int
l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
- struct rte_mbuf_offload *ol,
+ struct rte_crypto_op *op,
struct l2fwd_crypto_params *cparams)
{
struct ether_hdr *eth_hdr;
@@ -377,43 +384,43 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_op_attach_sym_session(op, cparams->session);
/* Append space for digest to end of packet */
- ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
+ op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
cparams->digest_length);
- ol->op.crypto.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
- ol->op.crypto.digest.length = cparams->digest_length;
+ op->sym->auth.digest.length = cparams->digest_length;
+
+ op->sym->auth.data.offset = ipdata_offset;
+ op->sym->auth.data.length = data_len;
- ol->op.crypto.iv.data = cparams->iv_key.data;
- ol->op.crypto.iv.phys_addr = cparams->iv_key.phys_addr;
- ol->op.crypto.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_cipher.offset = ipdata_offset;
- ol->op.crypto.data.to_cipher.length = data_len;
+ op->sym->cipher.iv.data = cparams->iv_key.data;
+ op->sym->cipher.iv.phys_addr = cparams->iv_key.phys_addr;
+ op->sym->cipher.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_hash.offset = ipdata_offset;
- ol->op.crypto.data.to_hash.length = data_len;
+ op->sym->cipher.data.offset = ipdata_offset;
+ op->sym->cipher.data.length = data_len;
- rte_pktmbuf_offload_attach(m, ol);
+ op->sym->m_src = m;
- return l2fwd_crypto_enqueue(m, cparams);
+ return l2fwd_crypto_enqueue(op, cparams);
}
/* Send the burst of packets on an output interface */
static int
-l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)
+l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
+ uint8_t port)
{
struct rte_mbuf **pkt_buffer;
unsigned ret;
- unsigned queueid = 0;
- pkt_buffer = (struct rte_mbuf **)qconf->tx_pkt_buf[port].buffer;
+ pkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer;
- ret = rte_eth_tx_burst(port, (uint16_t) queueid, pkt_buffer,
- (uint16_t)n);
+ ret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n);
port_statistics[port].tx += ret;
if (unlikely(ret < n)) {
port_statistics[port].dropped += (n - ret);
@@ -435,8 +442,8 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->tx_pkt_buf[port].len;
- qconf->tx_pkt_buf[port].buffer[len] = m;
+ len = qconf->pkt_buf[port].len;
+ qconf->pkt_buf[port].buffer[len] = m;
len++;
/* enough pkts to be sent */
@@ -445,7 +452,7 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
len = 0;
}
- qconf->tx_pkt_buf[port].len = len;
+ qconf->pkt_buf[port].len = len;
return 0;
}
@@ -505,6 +512,8 @@ static void
l2fwd_main_loop(struct l2fwd_crypto_options *options)
{
struct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST];
+ struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
+
unsigned lcore_id = rte_lcore_id();
uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
unsigned i, j, portid, nb_rx;
@@ -565,12 +574,12 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
if (unlikely(diff_tsc > drain_tsc)) {
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- if (qconf->tx_pkt_buf[portid].len == 0)
+ if (qconf->pkt_buf[portid].len == 0)
continue;
l2fwd_send_burst(&lcore_queue_conf[lcore_id],
- qconf->tx_pkt_buf[portid].len,
+ qconf->pkt_buf[portid].len,
(uint8_t) portid);
- qconf->tx_pkt_buf[portid].len = 0;
+ qconf->pkt_buf[portid].len = 0;
}
/* if timer is enabled */
@@ -599,8 +608,6 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
* Read packet from RX queues
*/
for (i = 0; i < qconf->nb_rx_ports; i++) {
- struct rte_mbuf_offload *ol;
-
portid = qconf->rx_port_list[i];
cparams = &port_cparams[i];
@@ -610,44 +617,49 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
port_statistics[portid].rx += nb_rx;
- /* Enqueue packets from Crypto device*/
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- ol = rte_pktmbuf_offload_alloc(
- l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
+ if (nb_rx) {
/*
- * If we can't allocate a offload, then drop
+ * If we can't allocate a crypto_ops, then drop
* the rest of the burst and dequeue and
* process the packets to free offload structs
*/
- if (unlikely(ol == NULL)) {
- for (; j < nb_rx; j++) {
- rte_pktmbuf_free(pkts_burst[j]);
- port_statistics[portid].dropped++;
- }
- break;
+ if (rte_crypto_op_bulk_alloc(
+ l2fwd_crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops_burst, nb_rx) !=
+ nb_rx) {
+ for (j = 0; j < nb_rx; j++)
+ rte_pktmbuf_free(pkts_burst[i]);
+
+ nb_rx = 0;
}
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- rte_prefetch0((void *)ol);
+ /* Enqueue packets from Crypto device*/
+ for (j = 0; j < nb_rx; j++) {
+ m = pkts_burst[j];
- l2fwd_simple_crypto_enqueue(m, ol, cparams);
+ l2fwd_simple_crypto_enqueue(m,
+ ops_burst[j], cparams);
+ }
}
/* Dequeue packets from Crypto device */
- nb_rx = rte_cryptodev_dequeue_burst(
- cparams->dev_id, cparams->qp_id,
- pkts_burst, MAX_PKT_BURST);
- crypto_statistics[cparams->dev_id].dequeued += nb_rx;
-
- /* Forward crypto'd packets */
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- rte_pktmbuf_offload_free(m->offload_ops);
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- l2fwd_simple_forward(m, portid);
- }
+ do {
+ nb_rx = rte_cryptodev_dequeue_burst(
+ cparams->dev_id, cparams->qp_id,
+ ops_burst, MAX_PKT_BURST);
+
+ crypto_statistics[cparams->dev_id].dequeued +=
+ nb_rx;
+
+ /* Forward crypto'd packets */
+ for (j = 0; j < nb_rx; j++) {
+ m = ops_burst[j]->sym->m_src;
+
+ rte_crypto_op_free(ops_burst[j]);
+ l2fwd_simple_forward(m, portid);
+ }
+ } while (nb_rx == MAX_PKT_BURST);
}
}
}
@@ -1384,15 +1396,17 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n");
/* create the mbuf pool */
- l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 128,
- 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512,
+ sizeof(struct rte_crypto_op),
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
if (l2fwd_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
/* create crypto op pool */
- l2fwd_mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "mbuf_offload_pool", NB_MBUF, 128, 0, rte_socket_id());
- if (l2fwd_mbuf_ol_pool == NULL)
+ l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, 0,
+ rte_socket_id());
+ if (l2fwd_crypto_op_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
/* Enable Ethernet ports */
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index 620c00b..e0c4fb9 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -44,23 +44,369 @@
extern "C" {
#endif
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+#include "rte_crypto_sym.h"
+
+/** Crypto operation types */
+enum rte_crypto_op_type {
+ RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ /**< Undefined operation type */
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ /**< Symmetric operation */
+};
+
/** Status of crypto operation */
enum rte_crypto_op_status {
RTE_CRYPTO_OP_STATUS_SUCCESS,
/**< Operation completed successfully */
- RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
- /**< Operation not yet submitted to a cryptodev */
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+ /**< Operation has not yet been processed by a crypto device */
RTE_CRYPTO_OP_STATUS_ENQUEUED,
/**< Operation is enqueued on device */
RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
/**< Authentication verification failed */
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION,
+ /**<
+ * Symmetric operation failed due to invalid session arguments, or if
+ * in session-less mode, failed to allocate private operation material.
+ */
RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
/**< Operation failed due to invalid arguments in request */
RTE_CRYPTO_OP_STATUS_ERROR,
/**< Error handling operation */
};
-#include <rte_crypto_sym.h>
+/**
+ * Cryptographic Operation.
+ *
+ * This structure contains data relating to performing cryptographic
+ * operations. This operation structure is used to contain any operation which
+ * is supported by the cryptodev API, PMDs should check the type parameter to
+ * verify that the operation is a support function of the device. Crypto
+ * operations are enqueued and dequeued in crypto PMDs using the
+ * rte_cryptodev_enqueue_burst() / rte_cryptodev_dequeue_burst() .
+ */
+struct rte_crypto_op {
+ enum rte_crypto_op_type type;
+ /**< operation type */
+
+ enum rte_crypto_op_status status;
+ /**<
+ * operation status - this is reset to
+ * RTE_CRYPTO_OP_STATUS_NOT_PROCESSED on allocation from mempool and
+ * will be set to RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation
+ * is successfully processed by a crypto PMD
+ */
+
+ struct rte_mempool *mempool;
+ /**< crypto operation mempool which operation is allocated from */
+
+ phys_addr_t phys_addr;
+ /**< physical address of crypto operation */
+
+ void *opaque_data;
+ /**< Opaque pointer for user data */
+
+ union {
+ struct rte_crypto_sym_op *sym;
+ /**< Symmetric operation parameters */
+ }; /**< operation specific parameters */
+} __rte_cache_aligned;
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ * @param type The crypto operation type.
+ */
+static inline void
+__rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type)
+{
+ op->type = type;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (type) {
+ case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
+ /** Symmetric operation structure starts after the end of the
+ * rte_crypto_op structure.
+ */
+ op->sym = (struct rte_crypto_sym_op *)(op + 1);
+ op->type = type;
+
+ __rte_crypto_sym_op_reset(op->sym);
+ break;
+ default:
+ break;
+ }
+
+ op->opaque_data = NULL;
+}
+
+/**
+ * Private data structure belonging to a crypto symmetric operation pool.
+ */
+struct rte_crypto_op_pool_private {
+ enum rte_crypto_op_type type;
+ /**< Crypto op pool type operation. */
+ uint16_t priv_size;
+ /**< Size of private area in each crypto operation. */
+};
+
+
+/**
+ * Returns the size of private data allocated with each rte_crypto_op object by
+ * the mempool
+ *
+ * @param mempool rte_crypto_op mempool
+ *
+ * @return private data size
+ */
+static inline uint16_t
+__rte_crypto_op_get_priv_data_size(struct rte_mempool *mempool)
+{
+ struct rte_crypto_op_pool_private *priv =
+ rte_mempool_get_priv(mempool);
+
+ return priv->priv_size;
+}
+
+
+/**
+ * Creates a crypto operation pool
+ *
+ * @param name pool name
+ * @param type crypto operation type, use
+ * RTE_CRYPTO_OP_TYPE_UNDEFINED for a pool which
+ * supports all operation types
+ * @param nb_elts number of elements in pool
+ * @param cache_size Number of elements to cache on lcore, see
+ * *rte_mempool_create* for further details about
+ * cache size
+ * @param priv_size Size of private data to allocate with each
+ * operation
+ * @param socket_id Socket to allocate memory on
+ *
+ * @return
+ * - On success pointer to mempool
+ * - On failure NULL
+ */
+extern struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id);
+
+/**
+ * Bulk allocate raw element from mempool and return as crypto operations
+ *
+ * @param mempool crypto operation mempool.
+ * @param type crypto operation type.
+ * @param ops Array to place allocated crypto operations
+ * @param nb_ops Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns number of ops allocated
+ */
+static inline int
+__rte_crypto_op_raw_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_crypto_op_pool_private *priv;
+
+ priv = rte_mempool_get_priv(mempool);
+ if (unlikely(priv->type != type &&
+ priv->type != RTE_CRYPTO_OP_TYPE_UNDEFINED))
+ return -EINVAL;
+
+ if (rte_mempool_get_bulk(mempool, (void **)ops, nb_ops) == 0)
+ return nb_ops;
+
+ return 0;
+}
+
+/**
+ * Allocate a crypto operation from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_op *
+rte_crypto_op_alloc(struct rte_mempool *mempool, enum rte_crypto_op_type type)
+{
+ struct rte_crypto_op *op = NULL;
+ int retval;
+
+ retval = __rte_crypto_op_raw_bulk_alloc(mempool, type, &op, 1);
+ if (unlikely(retval != 1))
+ return NULL;
+
+ __rte_crypto_op_reset(op, type);
+
+ return op;
+}
+
+
+/**
+ * Bulk allocate crypto operations from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ * @param ops Array to place allocated crypto operations
+ * @param nb_ops Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+
+static inline unsigned
+rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ if (unlikely(__rte_crypto_op_raw_bulk_alloc(mempool, type, ops, nb_ops)
+ != nb_ops))
+ return 0;
+
+ for (i = 0; i < nb_ops; i++)
+ __rte_crypto_op_reset(ops[i], type);
+
+ return nb_ops;
+}
+
+
+
+/**
+ * Returns a pointer to the private data of a crypto operation if
+ * that operation has enough capacity for requested size.
+ *
+ * @param op crypto operation.
+ * @param size size of space requested in private data.
+ *
+ * @returns
+ * - if sufficient space available returns pointer to start of private data
+ * - if insufficient space returns NULL
+ */
+static inline void *
+__rte_crypto_op_get_priv_data(struct rte_crypto_op *op, uint32_t size)
+{
+ uint32_t priv_size;
+
+ if (likely(op->mempool != NULL)) {
+ priv_size = __rte_crypto_op_get_priv_data_size(op->mempool);
+
+ if (likely(priv_size >= size))
+ return (void *)((uint8_t *)(op + 1) +
+ sizeof(struct rte_crypto_sym_op));
+ }
+
+ return NULL;
+}
+
+/**
+ * free crypto operation structure
+ * If operation has been allocate from a rte_mempool, then the operation will
+ * be returned to the mempool.
+ *
+ * @param op symmetric crypto operation
+ */
+static inline void
+rte_crypto_op_free(struct rte_crypto_op *op)
+{
+ if (op != NULL && op->mempool != NULL)
+ rte_mempool_put(op->mempool, op);
+}
+
+/**
+ * Allocate a symmetric crypto operation in the private data of an mbuf.
+ *
+ * @param m mbuf which is associated with the crypto operation, the
+ * operation will be allocated in the private data of that
+ * mbuf.
+ *
+ * @returns
+ * - On success returns a pointer to the crypto operation.
+ * - On failure returns NULL.
+ */
+static inline struct rte_crypto_op *
+rte_crypto_sym_op_alloc_from_mbuf_priv_data(struct rte_mbuf *m)
+{
+ if (unlikely(m == NULL))
+ return NULL;
+
+ /*
+ * check that the mbuf's private data size is sufficient to contain a
+ * crypto operation
+ */
+ if (unlikely(m->priv_size < (sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op))))
+ return NULL;
+
+ /* private data starts immediately after the mbuf header in the mbuf. */
+ struct rte_crypto_op *op = (struct rte_crypto_op *)(m + 1);
+
+ __rte_crypto_op_reset(op, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+
+ op->mempool = NULL;
+ op->sym->m_src = m;
+
+ return op;
+}
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type and configures
+ * the chaining of the xforms in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+rte_crypto_op_sym_xforms_alloc(struct rte_crypto_op *op, uint8_t nb_xforms)
+{
+ void *priv_data;
+ uint32_t size;
+
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return NULL;
+
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
+
+ priv_data = __rte_crypto_op_get_priv_data(op, size);
+ if (priv_data == NULL)
+ return NULL;
+
+ return __rte_crypto_sym_op_sym_xforms_alloc(op->sym, priv_data,
+ nb_xforms);
+}
+
+
+/**
+ * Attach a session to a crypto operation
+ *
+ * @param op crypto operation, must be of type symmetric
+ * @param sess cryptodev session
+ */
+static inline int
+rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return -1;
+
+ return __rte_crypto_sym_op_attach_sym_session(op->sym, sess);
+}
#ifdef __cplusplus
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index 270510e..831bbf1 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -46,6 +46,8 @@
extern "C" {
#endif
+#include <string.h>
+
#include <rte_mbuf.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -111,7 +113,6 @@ enum rte_crypto_cipher_operation {
/**< Decrypt cipher operation */
};
-
/**
* Symmetric Cipher Setup Data.
*
@@ -128,8 +129,8 @@ struct rte_crypto_cipher_xform {
/**< Cipher algorithm */
struct {
- uint8_t *data; /**< pointer to key data */
- size_t length; /**< key length in bytes */
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
} key;
/**< Cipher key
*
@@ -255,8 +256,8 @@ struct rte_crypto_auth_xform {
/**< Authentication algorithm selection */
struct {
- uint8_t *data; /**< pointer to key data */
- size_t length; /**< key length in bytes */
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
} key;
/**< Authentication key data.
* The authentication key length MUST be less than or equal to the
@@ -347,21 +348,24 @@ enum rte_crypto_sym_op_sess_type {
};
+struct rte_cryptodev_sym_session;
+
/**
- * Cryptographic Operation Data.
+ * Symmetric Cryptographic Operation.
*
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
- * call for performing cipher, hash, or a combined hash and cipher operations.
+ * This structure contains data relating to performing symmetric cryptographic
+ * processing on a referenced mbuf data buffer.
+ *
+ * When a symmetric crypto operation is enqueued with the device for processing
+ * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
+ * which contains the source data which the crypto operation is to be performed
+ * on.
*/
struct rte_crypto_sym_op {
- enum rte_crypto_sym_op_sess_type type;
- enum rte_crypto_op_status status;
+ struct rte_mbuf *m_src; /**< source mbuf */
+ struct rte_mbuf *m_dst; /**< destination mbuf */
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
+ enum rte_crypto_sym_op_sess_type type;
union {
struct rte_cryptodev_sym_session *session;
@@ -372,7 +376,7 @@ struct rte_crypto_sym_op {
struct {
struct {
- uint32_t offset;
+ uint32_t offset;
/**< Starting point for cipher processing, specified
* as number of bytes from start of data in the source
* buffer. The result of the cipher operation will be
@@ -380,7 +384,7 @@ struct rte_crypto_sym_op {
* this location.
*/
- uint32_t length;
+ uint32_t length;
/**< The message length, in bytes, of the source buffer
* on which the cryptographic operation will be
* computed. This must be a multiple of the block size
@@ -399,17 +403,68 @@ struct rte_crypto_sym_op {
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
* field should be set to 0.
*/
- } to_cipher; /**< Data offsets and length for ciphering */
+ } data; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * Initialisation Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length
+ * is 96 bits) or J0 (for other sizes), where J0 is as
+ * defined by NIST SP800-38D. Regardless of the IV
+ * length, a full 16 bytes needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the
+ * nonce should be written starting at &iv[1] (to allow
+ * space for the implementation to write in the flags
+ * in the first byte). Note that a full 16 bytes should
+ * be allocated, even though the length field will
+ * have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from
+ * IEEE Std 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD
+ * be 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ uint16_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * length of the IV (which must be the same as the
+ * block length of the cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length
+ * of the counter (which must be the same as the block
+ * length of the cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs)
+ * or 16, in which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce,
+ * which can be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+ } cipher;
+ struct {
struct {
- uint32_t offset;
+ uint32_t offset;
/**< Starting point for hash processing, specified as
* number of bytes from start of packet in source
* buffer.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
+ * ignored. The field @ref aad field
* should be set instead.
*
* @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
@@ -417,180 +472,169 @@ struct rte_crypto_sym_op {
* of the AAD data in the source buffer.
*/
- uint32_t length;
+ uint32_t length;
/**< The message length, in bytes, of the source
* buffer that the hash will be computed on.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
+ * ignored. The field @ref aad field should be set
+ * instead.
*
* @note
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
* of operation, this field specifies the length of
* the AAD data in the source buffer.
*/
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
+ } data; /**< Data offsets and length for authentication */
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
-
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result
+ * should be inserted (in the case of digest generation)
+ * or where the purported digest exists (in the case of
+ * digest verification).
+ *
+ * At session creation time, the client specified the
+ * digest result length with the digest_length member
+ * of the @ref rte_crypto_auth_xform structure. For
+ * physical crypto devices the caller must allocate at
+ * least digest_length of physically contiguous memory
+ * at this location.
+ *
+ * For digest generation, the digest result will
+ * overwrite any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is
+ * understood to be in the destination buffer for
+ * digest generation, and in the source buffer for
+ * digest verification. The location of the digest
+ * result in this case is immediately following the
+ * region over which the digest is computed.
+ */
+ phys_addr_t phys_addr;
+ /**< Physical address of digest */
+ uint16_t length;
+ /**< Length of digest */
+ } digest; /**< Digest parameters */
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_sym_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
-
- void *user_data;
- /**< opaque pointer for user data */
-};
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD)
+ * needed for authenticated cipher mechanisms (CCM and
+ * GCM), and to the IV for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is
+ * set up for the session in the @ref
+ * rte_crypto_auth_xform structure as part of the @ref
+ * rte_cryptodev_session_create function call. This
+ * length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM),
+ * the caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset
+ * of one byte into the array, leaving room for the
+ * implementation to write in the flags to the first
+ * byte.
+ *
+ * - the additional authentication data itself should
+ * be written starting at an offset of 18 bytes into
+ * the array, leaving room for the length encoding in
+ * the first two bytes of the second block.
+ *
+ * - the array should be big enough to hold the above
+ * fields, plus any padding to round this up to the
+ * nearest multiple of the block size (16 bytes).
+ * Padding will be added by the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus
+ * any space to round this up to the nearest multiple
+ * of the block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set
+ * to 0. Instead the AAD data should be placed in the
+ * source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint16_t length; /**< Length of digest */
+ } aad;
+ /**< Additional authentication parameters */
+ } auth;
+} __rte_cache_aligned;
/**
- * Reset the fields of a crypto operation to their default values.
+ * Reset the fields of a symmetric operation to their default values.
*
* @param op The crypto operation to be reset.
*/
static inline void
__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
{
+ memset(op, 0, sizeof(*op));
+
op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
}
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type to
+ * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
+ * in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+__rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
+ void *priv_data, uint8_t nb_xforms)
+{
+ struct rte_crypto_sym_xform *xform;
+
+ sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
+
+ do {
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
+ xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
+ } while (xform);
+
+ return sym_op->xform;
+}
+
+
+/**
+ * Attach a session to a symmetric crypto operation
+ *
+ * @param op crypto operation
+ * @param sess cryptodev session
+ */
+static inline int
+__rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
struct rte_cryptodev_sym_session *sess)
{
- op->session = sess;
- op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+ sym_op->session = sess;
+ sym_op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+
+ return 0;
}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index c7fef6a..4632ca3 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1056,3 +1056,79 @@ rte_cryptodev_sym_session_free(uint8_t dev_id,
return NULL;
}
+
+/** Initialise rte_crypto_op mempool element */
+static void
+rte_crypto_op_init(struct rte_mempool *mempool,
+ void *opaque_arg,
+ void *_op_data,
+ __rte_unused unsigned i)
+{
+ struct rte_crypto_op *op = _op_data;
+ enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
+
+ memset(_op_data, 0, mempool->elt_size);
+
+ __rte_crypto_op_reset(op, type);
+
+ op->phys_addr = rte_mem_virt2phy(_op_data);
+ op->mempool = mempool;
+}
+
+
+struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id)
+{
+ struct rte_crypto_op_pool_private *priv;
+
+ unsigned elt_size = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) +
+ priv_size;
+
+ /* lookup mempool in case already allocated */
+ struct rte_mempool *mp = rte_mempool_lookup(name);
+
+ if (mp != NULL) {
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ if (mp->elt_size != elt_size ||
+ mp->cache_size < cache_size ||
+ mp->size < nb_elts ||
+ priv->priv_size < priv_size) {
+ mp = NULL;
+ CDEV_LOG_ERR("Mempool %s already exists but with "
+ "incompatible parameters", name);
+ return NULL;
+ }
+ return mp;
+ }
+
+ mp = rte_mempool_create(
+ name,
+ nb_elts,
+ elt_size,
+ cache_size,
+ sizeof(struct rte_crypto_op_pool_private),
+ NULL,
+ NULL,
+ rte_crypto_op_init,
+ &type,
+ socket_id,
+ 0);
+
+ if (mp == NULL) {
+ CDEV_LOG_ERR("Failed to create mempool %s", name);
+ return NULL;
+ }
+
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ priv->priv_size = priv_size;
+ priv->type = type;
+
+ return mp;
+}
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index f4b38c1..aab8cff 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -40,16 +40,14 @@
* Defines RTE Crypto Device APIs for the provisioning of cipher and
* authentication operations.
*
- * @warning
* @b EXPERIMENTAL: this API may change without prior notice
+ *
*/
#ifdef __cplusplus
extern "C" {
#endif
-#include "stddef.h"
-
#include "rte_crypto.h"
#include "rte_dev.h"
@@ -67,6 +65,9 @@ enum rte_cryptodev_type {
RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT PMD Symmetric Crypto */
};
+
+extern const char **rte_cyptodev_names;
+
/* Logging Macros */
#define CDEV_LOG_ERR(fmt, args...) \
@@ -214,8 +215,6 @@ struct rte_cryptodev_config {
/**
* Configure a device.
*
- * EXPERIMENTAL: this API file may change without prior notice
- *
* This function must be invoked first before any other function in the
* API. This function can also be re-invoked when a device is in the
* stopped state.
@@ -411,12 +410,12 @@ rte_cryptodev_callback_unregister(uint8_t dev_id,
rte_cryptodev_cb_fn cb_fn, void *cb_arg);
-typedef uint16_t (*dequeue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Dequeue processed packets from queue pair of a device. */
-typedef uint16_t (*enqueue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Enqueue packets for processing on queue pair of a device. */
@@ -489,66 +488,65 @@ struct rte_cryptodev_data {
extern struct rte_cryptodev *rte_cryptodevs;
/**
*
- * Dequeue a burst of processed packets from a queue of the crypto device.
- * The dequeued packets are stored in *rte_mbuf* structures whose pointers are
- * supplied in the *pkts* array.
+ * Dequeue a burst of processed crypto operations from a queue on the crypto
+ * device. The dequeued operation are stored in *rte_crypto_op* structures
+ * whose pointers are supplied in the *ops* array.
*
- * The rte_crypto_dequeue_burst() function returns the number of packets
- * actually dequeued, which is the number of *rte_mbuf* data structures
- * effectively supplied into the *pkts* array.
+ * The rte_cryptodev_dequeue_burst() function returns the number of ops
+ * actually dequeued, which is the number of *rte_crypto_op* data structures
+ * effectively supplied into the *ops* array.
*
- * A return value equal to *nb_pkts* indicates that the queue contained
- * at least *rx_pkts* packets, and this is likely to signify that other
- * received packets remain in the input queue. Applications implementing
- * a "retrieve as much received packets as possible" policy can check this
- * specific case and keep invoking the rte_crypto_dequeue_burst() function
- * until a value less than *nb_pkts* is returned.
+ * A return value equal to *nb_ops* indicates that the queue contained
+ * at least *nb_ops* operations, and this is likely to signify that other
+ * processed operations remain in the devices output queue. Applications
+ * implementing a "retrieve as many processed operations as possible" policy
+ * can check this specific case and keep invoking the
+ * rte_cryptodev_dequeue_burst() function until a value less than
+ * *nb_ops* is returned.
*
- * The rte_crypto_dequeue_burst() function does not provide any error
+ * The rte_cryptodev_dequeue_burst() function does not provide any error
* notification to avoid the corresponding overhead.
*
- * @param dev_id The identifier of the device.
+ * @param dev_id The symmetric crypto device identifier
* @param qp_id The index of the queue pair from which to
* retrieve processed packets. The value must be
* in the range [0, nb_queue_pair - 1] previously
* supplied to rte_cryptodev_configure().
- * @param pkts The address of an array of pointers to
- * *rte_mbuf* structures that must be large enough
- * to store *nb_pkts* pointers in it.
- * @param nb_pkts The maximum number of packets to dequeue.
+ * @param ops The address of an array of pointers to
+ * *rte_crypto_op* structures that must be
+ * large enough to store *nb_ops* pointers in it.
+ * @param nb_ops The maximum number of operations to dequeue.
*
* @return
- * - The number of packets actually dequeued, which is the number
- * of pointers to *rte_mbuf* structures effectively supplied to the
- * *pkts* array.
+ * - The number of operations actually dequeued, which is the number
+ * of pointers to *rte_crypto_op* structures effectively supplied to the
+ * *ops* array.
*/
static inline uint16_t
rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
- nb_pkts = (*dev->dequeue_burst)
- (dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ nb_ops = (*dev->dequeue_burst)
+ (dev->data->queue_pairs[qp_id], ops, nb_ops);
- return nb_pkts;
+ return nb_ops;
}
/**
- * Enqueue a burst of packets for processing on a crypto device.
- *
- * The rte_crypto_enqueue_burst() function is invoked to place packets
- * on the queue *queue_id* of the device designated by its *dev_id*.
+ * Enqueue a burst of operations for processing on a crypto device.
*
- * The *nb_pkts* parameter is the number of packets to process which are
- * supplied in the *pkts* array of *rte_mbuf* structures.
+ * The rte_cryptodev_enqueue_burst() function is invoked to place
+ * crypto operations on the queue *qp_id* of the device designated by
+ * its *dev_id*.
*
- * The rte_crypto_enqueue_burst() function returns the number of packets it
- * actually sent. A return value equal to *nb_pkts* means that all packets
- * have been sent.
+ * The *nb_ops* parameter is the number of operations to process which are
+ * supplied in the *ops* array of *rte_crypto_op* structures.
*
- * Each mbuf in the *pkts* array must have a valid *rte_mbuf_offload* structure
- * attached which contains a valid crypto operation.
+ * The rte_cryptodev_enqueue_burst() function returns the number of
+ * operations it actually enqueued for processing. A return value equal to
+ * *nb_ops* means that all packets have been enqueued.
*
* @param dev_id The identifier of the device.
* @param qp_id The index of the queue pair which packets are
@@ -556,25 +554,25 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
* must be in the range [0, nb_queue_pairs - 1]
* previously supplied to
* *rte_cryptodev_configure*.
- * @param pkts The address of an array of *nb_pkts* pointers
- * to *rte_mbuf* structures which contain the
- * output packets.
- * @param nb_pkts The number of packets to transmit.
+ * @param ops The address of an array of *nb_ops* pointers
+ * to *rte_crypto_op* structures which contain
+ * the crypto operations to be processed.
+ * @param nb_ops The number of operations to process.
*
* @return
- * The number of packets actually enqueued on the crypto device. The return
- * value can be less than the value of the *nb_pkts* parameter when the
- * crypto devices queue is full or has been filled up.
- * The number of packets is 0 if the device hasn't been started.
+ * The number of operations actually enqueued on the crypto device. The return
+ * value can be less than the value of the *nb_ops* parameter when the
+ * crypto devices queue is full or if invalid parameters are specified in
+ * a *rte_crypto_op*.
*/
static inline uint16_t
rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
return (*dev->enqueue_burst)(
- dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ dev->data->queue_pairs[qp_id], ops, nb_ops);
}
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index a46af6f..b682184 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -1,4 +1,4 @@
-DPDK_2.2 {
+DPDK_16.04 {
global:
rte_cryptodevs;
@@ -27,6 +27,7 @@ DPDK_2.2 {
rte_cryptodev_queue_pair_setup;
rte_cryptodev_queue_pair_start;
rte_cryptodev_queue_pair_stop;
+ rte_crypto_op_pool_create;
local: *;
-};
+};
\ No newline at end of file
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v8 5/5] mbuf_offload: remove library
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 " Fiona Trahe
` (6 preceding siblings ...)
2016-03-10 13:43 ` [dpdk-dev] [PATCH v8 4/5] cryptodev: change burst API to be crypto op oriented Fiona Trahe
@ 2016-03-10 13:43 ` Fiona Trahe
2016-03-15 5:21 ` [dpdk-dev] [PATCH v7 0/2] cryptodev API changes Cao, Min
8 siblings, 0 replies; 62+ messages in thread
From: Fiona Trahe @ 2016-03-10 13:43 UTC (permalink / raw)
To: dev
From: Declan Doherty <declan.doherty@intel.com>
As cryptodev library does not depend on mbuf_offload library
any longer, this patch removes it.
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
---
MAINTAINERS | 4 -
app/test/test_cryptodev.c | 1 -
app/test/test_cryptodev_perf.c | 1 -
config/common_base | 7 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 1 -
drivers/crypto/qat/qat_crypto.c | 1 -
examples/l2fwd-crypto/main.c | 1 -
lib/Makefile | 1 -
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 ----
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 -------
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 ---------------------
.../rte_mbuf_offload_version.map | 7 -
14 files changed, 493 deletions(-)
delete mode 100644 lib/librte_mbuf_offload/Makefile
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
diff --git a/MAINTAINERS b/MAINTAINERS
index e253bf7..421c317 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -223,10 +223,6 @@ F: lib/librte_mbuf/
F: doc/guides/prog_guide/mbuf_lib.rst
F: app/test/test_mbuf.c
-Packet buffer offload - EXPERIMENTAL
-M: Declan Doherty <declan.doherty@intel.com>
-F: lib/librte_mbuf_offload/
-
Ethernet API
M: Thomas Monjalon <thomas.monjalon@6wind.com>
F: lib/librte_ether/
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 55367df..3240ecd 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -35,7 +35,6 @@
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#include <rte_mbuf_offload.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index b43f9aa..b3f4fd9 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -32,7 +32,6 @@
#include <rte_common.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
diff --git a/config/common_base b/config/common_base
index c73f71a..0a02924 100644
--- a/config/common_base
+++ b/config/common_base
@@ -361,13 +361,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
#
-# Compile librte_mbuf_offload
-# EXPERIMENTAL: API may change without prior notice
-#
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n
-
-#
# Compile librte_timer
#
CONFIG_RTE_LIBRTE_TIMER=y
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 7a91001..f626386 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -104,7 +104,6 @@ There are many libraries, so their headers may be grouped by topics:
- **containers**:
[mbuf] (@ref rte_mbuf.h),
- [mbuf_offload] (@ref rte_mbuf_offload.h),
[ring] (@ref rte_ring.h),
[distributor] (@ref rte_distributor.h),
[reorder] (@ref rte_reorder.h),
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index f28b29f..9599cc4 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -37,7 +37,6 @@
#include <rte_dev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
-#include <rte_mbuf_offload.h>
#include "rte_aesni_mb_pmd_private.h"
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 11f7fb2..3533f37 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -59,7 +59,6 @@
#include <rte_mbuf.h>
#include <rte_string_fns.h>
#include <rte_spinlock.h>
-#include <rte_mbuf_offload.h>
#include <rte_hexdump.h>
#include "qat_logs.h"
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 6aaa7c0..65e90b5 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -62,7 +62,6 @@
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_memcpy.h>
#include <rte_memory.h>
#include <rte_mempool.h>
diff --git a/lib/Makefile b/lib/Makefile
index 6840f87..f254dba 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -36,7 +36,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_EAL) += librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_RING) += librte_ring
DIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += librte_mempool
DIRS-$(CONFIG_RTE_LIBRTE_MBUF) += librte_mbuf
-DIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += librte_mbuf_offload
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += librte_timer
DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile
DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index e39ad28..52f96c3 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -728,9 +728,6 @@ typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */
typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
* with a single assignment */
-/** Opaque rte_mbuf_offload structure declarations */
-struct rte_mbuf_offload;
-
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
@@ -847,9 +844,6 @@ struct rte_mbuf {
/** Timesync flags for use with IEEE1588. */
uint16_t timesync;
-
- /* Chain of off-load operations to perform on mbuf */
- struct rte_mbuf_offload *offload_ops;
} __rte_cache_aligned;
static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
diff --git a/lib/librte_mbuf_offload/Makefile b/lib/librte_mbuf_offload/Makefile
deleted file mode 100644
index acdb449..0000000
--- a/lib/librte_mbuf_offload/Makefile
+++ /dev/null
@@ -1,52 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2015 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_mbuf_offload.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-EXPORT_MAP := rte_mbuf_offload_version.map
-
-LIBABIVER := 1
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) := rte_mbuf_offload.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD)-include := rte_mbuf_offload.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_cryptodev
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.c b/lib/librte_mbuf_offload/rte_mbuf_offload.c
deleted file mode 100644
index 5c0c9dd..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <rte_common.h>
-
-#include "rte_mbuf_offload.h"
-
-/** Initialize rte_mbuf_offload structure */
-static void
-rte_pktmbuf_offload_init(struct rte_mempool *mp,
- __rte_unused void *opaque_arg,
- void *_op_data,
- __rte_unused unsigned i)
-{
- struct rte_mbuf_offload *ol = _op_data;
-
- memset(_op_data, 0, mp->elt_size);
-
- ol->type = RTE_PKTMBUF_OL_NOT_SPECIFIED;
- ol->mp = mp;
-}
-
-
-struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id)
-{
- struct rte_pktmbuf_offload_pool_private *priv;
- unsigned elt_size = sizeof(struct rte_mbuf_offload) + priv_size;
-
-
- /* lookup mempool in case already allocated */
- struct rte_mempool *mp = rte_mempool_lookup(name);
-
- if (mp != NULL) {
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- if (priv->offload_priv_size < priv_size ||
- mp->elt_size != elt_size ||
- mp->cache_size < cache_size ||
- mp->size < size) {
- mp = NULL;
- return NULL;
- }
- return mp;
- }
-
- mp = rte_mempool_create(
- name,
- size,
- elt_size,
- cache_size,
- sizeof(struct rte_pktmbuf_offload_pool_private),
- NULL,
- NULL,
- rte_pktmbuf_offload_init,
- NULL,
- socket_id,
- 0);
-
- if (mp == NULL)
- return NULL;
-
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- priv->offload_priv_size = priv_size;
- return mp;
-}
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
deleted file mode 100644
index 5ce6058..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_MBUF_OFFLOAD_H_
-#define _RTE_MBUF_OFFLOAD_H_
-
-/**
- * @file
- * RTE mbuf offload
- *
- * The rte_mbuf_offload library provides the ability to specify a device generic
- * off-load operation independent of the current Rx/Tx Ethernet offloads
- * supported within the rte_mbuf structure, and add supports for multiple
- * off-load operations and offload device types.
- *
- * The rte_mbuf_offload specifies the particular off-load operation type, such
- * as a crypto operation, and provides a container for the operations
- * parameter's inside the op union. These parameters are then used by the
- * device which supports that operation to perform the specified offload.
- *
- * This library provides an API to create pre-allocated mempool of offload
- * operations, with supporting allocate and free functions. It also provides
- * APIs for attaching an offload to a mbuf, as well as an API to retrieve a
- * specified offload type from an mbuf offload chain.
- *
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- */
-
-#include <rte_mbuf.h>
-#include <rte_crypto.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** packet mbuf offload operation types */
-enum rte_mbuf_ol_op_type {
- RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
- /**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO_SYM
- /**< Crypto offload operation */
-};
-
-/**
- * Generic packet mbuf offload
- * This is used to specify a offload operation to be performed on a rte_mbuf.
- * Multiple offload operations can be chained to the same mbuf, but only a
- * single offload operation of a particular type can be in the chain
- */
-struct rte_mbuf_offload {
- struct rte_mbuf_offload *next; /**< next offload in chain */
- struct rte_mbuf *m; /**< mbuf offload is attached to */
- struct rte_mempool *mp; /**< mempool offload allocated from */
-
- enum rte_mbuf_ol_op_type type; /**< offload type */
- union {
- struct rte_crypto_sym_op crypto; /**< Crypto operation */
- } op;
-};
-
-/**< private data structure belonging to packet mbug offload mempool */
-struct rte_pktmbuf_offload_pool_private {
- uint16_t offload_priv_size;
- /**< Size of private area in each mbuf_offload. */
-};
-
-
-/**
- * Creates a mempool of rte_mbuf_offload objects
- *
- * EXPERIMENTAL: this API file may change without prior notice
- *
- * @param name mempool name
- * @param size number of objects in mempool
- * @param cache_size cache size of objects for each core
- * @param priv_size size of private data to be allocated with each
- * rte_mbuf_offload object
- * @param socket_id Socket on which to allocate mempool objects
- *
- * @return
- * - On success returns a valid mempool of rte_mbuf_offload objects
- * - On failure return NULL
- */
-extern struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id);
-
-
-/**
- * Returns private data size allocated with each rte_mbuf_offload object by
- * the mempool
- *
- * @param mpool rte_mbuf_offload mempool
- *
- * @return private data size
- */
-static inline uint16_t
-__rte_pktmbuf_offload_priv_size(struct rte_mempool *mpool)
-{
- struct rte_pktmbuf_offload_pool_private *priv =
- (struct rte_pktmbuf_offload_pool_private *)rte_mempool_get_priv(mpool);
-
- return priv->offload_priv_size;
-}
-
-/**
- * Get specified off-load operation type from mbuf.
- *
- * @param m packet mbuf.
- * @param type offload operation type requested.
- *
- * @return
- * - On success retruns rte_mbuf_offload pointer
- * - On failure returns NULL
- *
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_get(struct rte_mbuf *m, enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol;
-
- for (ol = m->offload_ops; ol != NULL; ol = ol->next)
- if (ol->type == type)
- return ol;
-
- return ol;
-}
-
-/**
- * Attach a rte_mbuf_offload to a mbuf. We only support a single offload of any
- * one type in our chain of offloads.
- *
- * @param m packet mbuf.
- * @param ol rte_mbuf_offload strucutre to be attached
- *
- * @returns
- * - On success returns the pointer to the offload we just added
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_attach(struct rte_mbuf *m, struct rte_mbuf_offload *ol)
-{
- struct rte_mbuf_offload **ol_last;
-
- for (ol_last = &m->offload_ops; ol_last[0] != NULL;
- ol_last = &ol_last[0]->next)
- if (ol_last[0]->type == ol->type)
- return NULL;
-
- ol_last[0] = ol;
- ol_last[0]->m = m;
- ol_last[0]->next = NULL;
-
- return ol_last[0];
-}
-
-
-/** Rearms rte_mbuf_offload default parameters */
-static inline void
-__rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
- enum rte_mbuf_ol_op_type type)
-{
- ol->m = NULL;
- ol->type = type;
-
- switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO_SYM:
- __rte_crypto_sym_op_reset(&ol->op.crypto); break;
- default:
- break;
- }
-}
-
-/** Allocate rte_mbuf_offload from mempool */
-static inline struct rte_mbuf_offload *
-__rte_pktmbuf_offload_raw_alloc(struct rte_mempool *mp)
-{
- void *buf = NULL;
-
- if (rte_mempool_get(mp, &buf) < 0)
- return NULL;
-
- return (struct rte_mbuf_offload *)buf;
-}
-
-/**
- * Allocate a rte_mbuf_offload with a specified operation type from
- * rte_mbuf_offload mempool
- *
- * @param mpool rte_mbuf_offload mempool
- * @param type offload operation type
- *
- * @returns
- * - On success returns a valid rte_mbuf_offload structure
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_alloc(struct rte_mempool *mpool,
- enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol = __rte_pktmbuf_offload_raw_alloc(mpool);
-
- if (ol != NULL)
- __rte_pktmbuf_offload_reset(ol, type);
-
- return ol;
-}
-
-/**
- * free rte_mbuf_offload structure
- */
-static inline void
-rte_pktmbuf_offload_free(struct rte_mbuf_offload *ol)
-{
- if (ol != NULL && ol->mp != NULL)
- rte_mempool_put(ol->mp, ol);
-}
-
-/**
- * Checks if the private data of a rte_mbuf_offload has enough capacity for
- * requested size
- *
- * @returns
- * - if sufficient space available returns pointer to start of private data
- * - if insufficient space returns NULL
- */
-static inline void *
-__rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
- uint16_t size)
-{
- uint16_t priv_size;
-
- if (likely(ol->mp != NULL)) {
- priv_size = __rte_pktmbuf_offload_priv_size(ol->mp);
-
- if (likely(priv_size >= size))
- return (void *)(ol + 1);
- }
- return NULL;
-}
-
-/**
- * Allocate space for crypto xforms in the private data space of the
- * rte_mbuf_offload. This also defaults the crypto xform type and configures
- * the chaining of the xform in the crypto operation
- *
- * @return
- * - On success returns pointer to first crypto xform in crypto operations chain
- * - On failure returns NULL
- */
-static inline struct rte_crypto_sym_xform *
-rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
- unsigned nb_xforms)
-{
- struct rte_crypto_sym_xform *xform;
- void *priv_data;
- uint16_t size;
-
- size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
- priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
-
- if (priv_data == NULL)
- return NULL;
-
- ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
-
- do {
- xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
- xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
- } while (xform);
-
- return ol->op.crypto.xform;
-}
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MBUF_OFFLOAD_H_ */
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map b/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
deleted file mode 100644
index 3d3b06a..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
+++ /dev/null
@@ -1,7 +0,0 @@
-DPDK_2.2 {
- global:
-
- rte_pktmbuf_offload_pool_create;
-
- local: *;
-};
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v8 4/5] cryptodev: change burst API to be crypto op oriented
2016-03-10 13:43 ` [dpdk-dev] [PATCH v8 4/5] cryptodev: change burst API to be crypto op oriented Fiona Trahe
@ 2016-03-10 14:03 ` Thomas Monjalon
0 siblings, 0 replies; 62+ messages in thread
From: Thomas Monjalon @ 2016-03-10 14:03 UTC (permalink / raw)
To: Fiona Trahe; +Cc: dev
2016-03-10 13:43, Fiona Trahe:
> lib/librte_cryptodev/rte_crypto.h | 352 +++++++++++-
One of the problems with inline functions in a header, is that it must
compile with C and C++. Unfortunately there is a small error here when
included in a C++ app:
rte_crypto.h:172:24: error:
invalid conversion from ‘void*’ to ‘rte_crypto_op_pool_private*’
rte_crypto.h:220:29: error:
invalid conversion from ‘void*’ to ‘rte_crypto_op_pool_private*’
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v8 0/5] cryptodev API changes
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 0/5] " Fiona Trahe
@ 2016-03-10 14:05 ` De Lara Guarch, Pablo
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 " Fiona Trahe
` (6 subsequent siblings)
7 siblings, 0 replies; 62+ messages in thread
From: De Lara Guarch, Pablo @ 2016-03-10 14:05 UTC (permalink / raw)
To: Trahe, Fiona, dev
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Fiona Trahe
> Sent: Thursday, March 10, 2016 1:43 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v8 0/5] cryptodev API changes
>
>
> This patch set separates the symmetric crypto operations from generic
> operations and then modifies the cryptodev burst API to accept bursts of
> rte_crypto_op rather than rte_mbufs.
>
> v8:
> - split patchset for easier review
> - fix broken /examples/l2fwd-crypto build in intermediate patch
> - split removal of rte_mbuf_offload into separate commit
>
> v7:
> - remove trailing spaces introduced in v6
> - rebase against recent config file changes
>
> v6:
> - restore EXPERIMENTAL label to cryptodev. Will handle removal in separate
> thread.
> (email subject was incorrect in v5, so v5 hasn't arrived in patchwork,
> therefore v6 is in-reply-to v4 message id)
>
> V5:
> - updates .map file
> - removes EXPERIMENTAL label from rte_cryptodev.h
>
> V4:
> - Fixes for issues introduced in __rte_crypto_op_raw_bulk_alloc in V3
> patcheset.
> - Typo fix in cached attribute on rte_crypto_op structure.
>
> V3:
> - Addresses V2 comments
> - Rebased for head
>
> Declan Doherty (2):
> cryptodev: change burst API to be crypto op oriented
> mbuf_offload: remove library
>
> Fiona Trahe (3):
> cryptodev: code cleanup
> cryptodev: refactor to partition common from symmetric-specific code
> cryptodev: remove unused phys_addr field from key
>
> MAINTAINERS | 4 -
> app/test/test_cryptodev.c | 883 +++++++++++----------
> app/test/test_cryptodev.h | 5 +-
> app/test/test_cryptodev_perf.c | 270 ++++---
> config/common_base | 7 -
> doc/api/doxy-api-index.md | 1 -
> drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 190 +++--
> drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 30 +-
> drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 10 +-
> drivers/crypto/qat/qat_crypto.c | 160 ++--
> drivers/crypto/qat/qat_crypto.h | 14 +-
> drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
> examples/l2fwd-crypto/main.c | 234 +++---
> lib/Makefile | 1 -
> lib/librte_cryptodev/Makefile | 1 +
> lib/librte_cryptodev/rte_crypto.h | 819 ++++++++-----------
> lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
> lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
> lib/librte_cryptodev/rte_cryptodev.h | 189 ++---
> lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
> lib/librte_cryptodev/rte_cryptodev_version.map | 7 +-
> lib/librte_mbuf/rte_mbuf.h | 6 -
> lib/librte_mbuf_offload/Makefile | 52 --
> lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
> lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 --------
> .../rte_mbuf_offload_version.map | 7 -
> 26 files changed, 2130 insertions(+), 1967 deletions(-)
> create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
> delete mode 100644 lib/librte_mbuf_offload/Makefile
> delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
> delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
> delete mode 100644
> lib/librte_mbuf_offload/rte_mbuf_offload_version.map
>
> --
> 2.1.0
Series-acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v9 0/5] cryptodev API changes
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 0/5] " Fiona Trahe
2016-03-10 14:05 ` De Lara Guarch, Pablo
@ 2016-03-10 15:41 ` Fiona Trahe
2016-03-10 16:14 ` Thomas Monjalon
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 1/5] cryptodev: code cleanup Fiona Trahe
` (5 subsequent siblings)
7 siblings, 1 reply; 62+ messages in thread
From: Fiona Trahe @ 2016-03-10 15:41 UTC (permalink / raw)
To: dev
From: Pablo de Lara <pablo.de.lara.guarch@intel.com>
This patch set separates the symmetric crypto operations from generic operations
and then modifies the cryptodev burst API to accept bursts of rte_crypto_op rather
than rte_mbufs.
v9:
- include missing casting that was making C++ complain
v8:
- split patchset for easier review
- fix broken /examples/l2fwd-crypto build in intermediate patch
- split removal of rte_mbuf_offload into separate commit
v7:
- remove trailing spaces introduced in v6
- rebase against recent config file changes
v6:
- restore EXPERIMENTAL label to cryptodev. Will handle removal in separate thread.
(email subject was incorrect in v5, so v5 hasn't arrived in patchwork, therefore v6 is in-reply-to v4 message id)
V5:
- updates .map file
- removes EXPERIMENTAL label from rte_cryptodev.h
V4:
- Fixes for issues introduced in __rte_crypto_op_raw_bulk_alloc in V3 patcheset.
- Typo fix in cached attribute on rte_crypto_op structure.
V3:
- Addresses V2 comments
- Rebased for head
Declan Doherty (2):
cryptodev: change burst API to be crypto op oriented
mbuf_offload: remove library
Fiona Trahe (3):
cryptodev: code cleanup
cryptodev: refactor to partition common from symmetric-specific code
cryptodev: remove unused phys_addr field from key
MAINTAINERS | 4 -
app/test/test_cryptodev.c | 883 +++++++++++----------
app/test/test_cryptodev.h | 5 +-
app/test/test_cryptodev_perf.c | 270 ++++---
config/common_base | 7 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 190 +++--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 30 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 10 +-
drivers/crypto/qat/qat_crypto.c | 160 ++--
drivers/crypto/qat/qat_crypto.h | 14 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 234 +++---
lib/Makefile | 1 -
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 819 ++++++++-----------
lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
lib/librte_cryptodev/rte_cryptodev.h | 189 ++---
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 9 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 --------
.../rte_mbuf_offload_version.map | 7 -
26 files changed, 2131 insertions(+), 1968 deletions(-)
create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
delete mode 100644 lib/librte_mbuf_offload/Makefile
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v9 1/5] cryptodev: code cleanup
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 0/5] " Fiona Trahe
2016-03-10 14:05 ` De Lara Guarch, Pablo
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 " Fiona Trahe
@ 2016-03-10 15:41 ` Fiona Trahe
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 2/5] cryptodev: refactor to partition common from symmetric-specific code Fiona Trahe
` (4 subsequent siblings)
7 siblings, 0 replies; 62+ messages in thread
From: Fiona Trahe @ 2016-03-10 15:41 UTC (permalink / raw)
To: dev
- Fixed >80char lines in test file
- Removed unused elements from stats struct
- Removed unused objects in rte_cryptodev_pmd.h
- Renamed variables
- Replaced leading spaces with tabs
- Improved performance results display in test
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
---
app/test/test_cryptodev.c | 151 ++++++++++-----------
app/test/test_cryptodev.h | 5 +-
app/test/test_cryptodev_perf.c | 54 +++++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 28 ++--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 12 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 2 +-
lib/librte_cryptodev/rte_cryptodev.h | 35 +----
lib/librte_cryptodev/rte_cryptodev_pmd.h | 10 +-
8 files changed, 139 insertions(+), 158 deletions(-)
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 62f8fb0..1a0f204 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -165,7 +165,8 @@ testsuite_setup(void)
ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
"MBUF_OFFLOAD_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_xform),
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -679,76 +680,77 @@ static uint8_t aes_cbc_iv[] = {
/* ***** AES-CBC / HMAC-SHA1 Hash Tests ***** */
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_ciphertext[] = {
- 0x8B, 0X4D, 0XDA, 0X1B, 0XCF, 0X04, 0XA0, 0X31,
- 0XB4, 0XBF, 0XBD, 0X68, 0X43, 0X20, 0X7E, 0X76,
- 0XB1, 0X96, 0X8B, 0XA2, 0X7C, 0XA2, 0X83, 0X9E,
- 0X39, 0X5A, 0X2F, 0X7E, 0X92, 0XB4, 0X48, 0X1A,
- 0X3F, 0X6B, 0X5D, 0XDF, 0X52, 0X85, 0X5F, 0X8E,
- 0X42, 0X3C, 0XFB, 0XE9, 0X1A, 0X24, 0XD6, 0X08,
- 0XDD, 0XFD, 0X16, 0XFB, 0XE9, 0X55, 0XEF, 0XF0,
- 0XA0, 0X8D, 0X13, 0XAB, 0X81, 0XC6, 0X90, 0X01,
- 0XB5, 0X18, 0X84, 0XB3, 0XF6, 0XE6, 0X11, 0X57,
- 0XD6, 0X71, 0XC6, 0X3C, 0X3F, 0X2F, 0X33, 0XEE,
- 0X24, 0X42, 0X6E, 0XAC, 0X0B, 0XCA, 0XEC, 0XF9,
- 0X84, 0XF8, 0X22, 0XAA, 0X60, 0XF0, 0X32, 0XA9,
- 0X75, 0X75, 0X3B, 0XCB, 0X70, 0X21, 0X0A, 0X8D,
- 0X0F, 0XE0, 0XC4, 0X78, 0X2B, 0XF8, 0X97, 0XE3,
- 0XE4, 0X26, 0X4B, 0X29, 0XDA, 0X88, 0XCD, 0X46,
- 0XEC, 0XAA, 0XF9, 0X7F, 0XF1, 0X15, 0XEA, 0XC3,
- 0X87, 0XE6, 0X31, 0XF2, 0XCF, 0XDE, 0X4D, 0X80,
- 0X70, 0X91, 0X7E, 0X0C, 0XF7, 0X26, 0X3A, 0X92,
- 0X4F, 0X18, 0X83, 0XC0, 0X8F, 0X59, 0X01, 0XA5,
- 0X88, 0XD1, 0XDB, 0X26, 0X71, 0X27, 0X16, 0XF5,
- 0XEE, 0X10, 0X82, 0XAC, 0X68, 0X26, 0X9B, 0XE2,
- 0X6D, 0XD8, 0X9A, 0X80, 0XDF, 0X04, 0X31, 0XD5,
- 0XF1, 0X35, 0X5C, 0X3B, 0XDD, 0X9A, 0X65, 0XBA,
- 0X58, 0X34, 0X85, 0X61, 0X1C, 0X42, 0X10, 0X76,
- 0X73, 0X02, 0X42, 0XC9, 0X23, 0X18, 0X8E, 0XB4,
- 0X6F, 0XB4, 0XA3, 0X54, 0X6E, 0X88, 0X3B, 0X62,
- 0X7C, 0X02, 0X8D, 0X4C, 0X9F, 0XC8, 0X45, 0XF4,
- 0XC9, 0XDE, 0X4F, 0XEB, 0X22, 0X83, 0X1B, 0XE4,
- 0X49, 0X37, 0XE4, 0XAD, 0XE7, 0XCD, 0X21, 0X54,
- 0XBC, 0X1C, 0XC2, 0X04, 0X97, 0XB4, 0X10, 0X61,
- 0XF0, 0XE4, 0XEF, 0X27, 0X63, 0X3A, 0XDA, 0X91,
- 0X41, 0X25, 0X62, 0X1C, 0X5C, 0XB6, 0X38, 0X4A,
- 0X88, 0X71, 0X59, 0X5A, 0X8D, 0XA0, 0X09, 0XAF,
- 0X72, 0X94, 0XD7, 0X79, 0X5C, 0X60, 0X7C, 0X8F,
- 0X4C, 0XF5, 0XD9, 0XA1, 0X39, 0X6D, 0X81, 0X28,
- 0XEF, 0X13, 0X28, 0XDF, 0XF5, 0X3E, 0XF7, 0X8E,
- 0X09, 0X9C, 0X78, 0X18, 0X79, 0XB8, 0X68, 0XD7,
- 0XA8, 0X29, 0X62, 0XAD, 0XDE, 0XE1, 0X61, 0X76,
- 0X1B, 0X05, 0X16, 0XCD, 0XBF, 0X02, 0X8E, 0XA6,
- 0X43, 0X6E, 0X92, 0X55, 0X4F, 0X60, 0X9C, 0X03,
- 0XB8, 0X4F, 0XA3, 0X02, 0XAC, 0XA8, 0XA7, 0X0C,
- 0X1E, 0XB5, 0X6B, 0XF8, 0XC8, 0X4D, 0XDE, 0XD2,
- 0XB0, 0X29, 0X6E, 0X40, 0XE6, 0XD6, 0XC9, 0XE6,
- 0XB9, 0X0F, 0XB6, 0X63, 0XF5, 0XAA, 0X2B, 0X96,
- 0XA7, 0X16, 0XAC, 0X4E, 0X0A, 0X33, 0X1C, 0XA6,
- 0XE6, 0XBD, 0X8A, 0XCF, 0X40, 0XA9, 0XB2, 0XFA,
- 0X63, 0X27, 0XFD, 0X9B, 0XD9, 0XFC, 0XD5, 0X87,
- 0X8D, 0X4C, 0XB6, 0XA4, 0XCB, 0XE7, 0X74, 0X55,
- 0XF4, 0XFB, 0X41, 0X25, 0XB5, 0X4B, 0X0A, 0X1B,
- 0XB1, 0XD6, 0XB7, 0XD9, 0X47, 0X2A, 0XC3, 0X98,
- 0X6A, 0XC4, 0X03, 0X73, 0X1F, 0X93, 0X6E, 0X53,
- 0X19, 0X25, 0X64, 0X15, 0X83, 0XF9, 0X73, 0X2A,
- 0X74, 0XB4, 0X93, 0X69, 0XC4, 0X72, 0XFC, 0X26,
- 0XA2, 0X9F, 0X43, 0X45, 0XDD, 0XB9, 0XEF, 0X36,
- 0XC8, 0X3A, 0XCD, 0X99, 0X9B, 0X54, 0X1A, 0X36,
- 0XC1, 0X59, 0XF8, 0X98, 0XA8, 0XCC, 0X28, 0X0D,
- 0X73, 0X4C, 0XEE, 0X98, 0XCB, 0X7C, 0X58, 0X7E,
- 0X20, 0X75, 0X1E, 0XB7, 0XC9, 0XF8, 0XF2, 0X0E,
- 0X63, 0X9E, 0X05, 0X78, 0X1A, 0XB6, 0XA8, 0X7A,
- 0XF9, 0X98, 0X6A, 0XA6, 0X46, 0X84, 0X2E, 0XF6,
- 0X4B, 0XDC, 0X9B, 0X8F, 0X9B, 0X8F, 0XEE, 0XB4,
- 0XAA, 0X3F, 0XEE, 0XC0, 0X37, 0X27, 0X76, 0XC7,
- 0X95, 0XBB, 0X26, 0X74, 0X69, 0X12, 0X7F, 0XF1,
- 0XBB, 0XFF, 0XAE, 0XB5, 0X99, 0X6E, 0XCB, 0X0C
+ 0x8B, 0x4D, 0xDA, 0x1B, 0xCF, 0x04, 0xA0, 0x31,
+ 0xB4, 0xBF, 0xBD, 0x68, 0x43, 0x20, 0x7E, 0x76,
+ 0xB1, 0x96, 0x8B, 0xA2, 0x7C, 0xA2, 0x83, 0x9E,
+ 0x39, 0x5A, 0x2F, 0x7E, 0x92, 0xB4, 0x48, 0x1A,
+ 0x3F, 0x6B, 0x5D, 0xDF, 0x52, 0x85, 0x5F, 0x8E,
+ 0x42, 0x3C, 0xFB, 0xE9, 0x1A, 0x24, 0xD6, 0x08,
+ 0xDD, 0xFD, 0x16, 0xFB, 0xE9, 0x55, 0xEF, 0xF0,
+ 0xA0, 0x8D, 0x13, 0xAB, 0x81, 0xC6, 0x90, 0x01,
+ 0xB5, 0x18, 0x84, 0xB3, 0xF6, 0xE6, 0x11, 0x57,
+ 0xD6, 0x71, 0xC6, 0x3C, 0x3F, 0x2F, 0x33, 0xEE,
+ 0x24, 0x42, 0x6E, 0xAC, 0x0B, 0xCA, 0xEC, 0xF9,
+ 0x84, 0xF8, 0x22, 0xAA, 0x60, 0xF0, 0x32, 0xA9,
+ 0x75, 0x75, 0x3B, 0xCB, 0x70, 0x21, 0x0A, 0x8D,
+ 0x0F, 0xE0, 0xC4, 0x78, 0x2B, 0xF8, 0x97, 0xE3,
+ 0xE4, 0x26, 0x4B, 0x29, 0xDA, 0x88, 0xCD, 0x46,
+ 0xEC, 0xAA, 0xF9, 0x7F, 0xF1, 0x15, 0xEA, 0xC3,
+ 0x87, 0xE6, 0x31, 0xF2, 0xCF, 0xDE, 0x4D, 0x80,
+ 0x70, 0x91, 0x7E, 0x0C, 0xF7, 0x26, 0x3A, 0x92,
+ 0x4F, 0x18, 0x83, 0xC0, 0x8F, 0x59, 0x01, 0xA5,
+ 0x88, 0xD1, 0xDB, 0x26, 0x71, 0x27, 0x16, 0xF5,
+ 0xEE, 0x10, 0x82, 0xAC, 0x68, 0x26, 0x9B, 0xE2,
+ 0x6D, 0xD8, 0x9A, 0x80, 0xDF, 0x04, 0x31, 0xD5,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x73, 0x02, 0x42, 0xC9, 0x23, 0x18, 0x8E, 0xB4,
+ 0x6F, 0xB4, 0xA3, 0x54, 0x6E, 0x88, 0x3B, 0x62,
+ 0x7C, 0x02, 0x8D, 0x4C, 0x9F, 0xC8, 0x45, 0xF4,
+ 0xC9, 0xDE, 0x4F, 0xEB, 0x22, 0x83, 0x1B, 0xE4,
+ 0x49, 0x37, 0xE4, 0xAD, 0xE7, 0xCD, 0x21, 0x54,
+ 0xBC, 0x1C, 0xC2, 0x04, 0x97, 0xB4, 0x10, 0x61,
+ 0xF0, 0xE4, 0xEF, 0x27, 0x63, 0x3A, 0xDA, 0x91,
+ 0x41, 0x25, 0x62, 0x1C, 0x5C, 0xB6, 0x38, 0x4A,
+ 0x88, 0x71, 0x59, 0x5A, 0x8D, 0xA0, 0x09, 0xAF,
+ 0x72, 0x94, 0xD7, 0x79, 0x5C, 0x60, 0x7C, 0x8F,
+ 0x4C, 0xF5, 0xD9, 0xA1, 0x39, 0x6D, 0x81, 0x28,
+ 0xEF, 0x13, 0x28, 0xDF, 0xF5, 0x3E, 0xF7, 0x8E,
+ 0x09, 0x9C, 0x78, 0x18, 0x79, 0xB8, 0x68, 0xD7,
+ 0xA8, 0x29, 0x62, 0xAD, 0xDE, 0xE1, 0x61, 0x76,
+ 0x1B, 0x05, 0x16, 0xCD, 0xBF, 0x02, 0x8E, 0xA6,
+ 0x43, 0x6E, 0x92, 0x55, 0x4F, 0x60, 0x9C, 0x03,
+ 0xB8, 0x4F, 0xA3, 0x02, 0xAC, 0xA8, 0xA7, 0x0C,
+ 0x1E, 0xB5, 0x6B, 0xF8, 0xC8, 0x4D, 0xDE, 0xD2,
+ 0xB0, 0x29, 0x6E, 0x40, 0xE6, 0xD6, 0xC9, 0xE6,
+ 0xB9, 0x0F, 0xB6, 0x63, 0xF5, 0xAA, 0x2B, 0x96,
+ 0xA7, 0x16, 0xAC, 0x4E, 0x0A, 0x33, 0x1C, 0xA6,
+ 0xE6, 0xBD, 0x8A, 0xCF, 0x40, 0xA9, 0xB2, 0xFA,
+ 0x63, 0x27, 0xFD, 0x9B, 0xD9, 0xFC, 0xD5, 0x87,
+ 0x8D, 0x4C, 0xB6, 0xA4, 0xCB, 0xE7, 0x74, 0x55,
+ 0xF4, 0xFB, 0x41, 0x25, 0xB5, 0x4B, 0x0A, 0x1B,
+ 0xB1, 0xD6, 0xB7, 0xD9, 0x47, 0x2A, 0xC3, 0x98,
+ 0x6A, 0xC4, 0x03, 0x73, 0x1F, 0x93, 0x6E, 0x53,
+ 0x19, 0x25, 0x64, 0x15, 0x83, 0xF9, 0x73, 0x2A,
+ 0x74, 0xB4, 0x93, 0x69, 0xC4, 0x72, 0xFC, 0x26,
+ 0xA2, 0x9F, 0x43, 0x45, 0xDD, 0xB9, 0xEF, 0x36,
+ 0xC8, 0x3A, 0xCD, 0x99, 0x9B, 0x54, 0x1A, 0x36,
+ 0xC1, 0x59, 0xF8, 0x98, 0xA8, 0xCC, 0x28, 0x0D,
+ 0x73, 0x4C, 0xEE, 0x98, 0xCB, 0x7C, 0x58, 0x7E,
+ 0x20, 0x75, 0x1E, 0xB7, 0xC9, 0xF8, 0xF2, 0x0E,
+ 0x63, 0x9E, 0x05, 0x78, 0x1A, 0xB6, 0xA8, 0x7A,
+ 0xF9, 0x98, 0x6A, 0xA6, 0x46, 0x84, 0x2E, 0xF6,
+ 0x4B, 0xDC, 0x9B, 0x8F, 0x9B, 0x8F, 0xEE, 0xB4,
+ 0xAA, 0x3F, 0xEE, 0xC0, 0x37, 0x27, 0x76, 0xC7,
+ 0x95, 0xBB, 0x26, 0x74, 0x69, 0x12, 0x7F, 0xF1,
+ 0xBB, 0xFF, 0xAE, 0xB5, 0x99, 0x6E, 0xCB, 0x0C
};
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest[] = {
- 0x9a, 0X4f, 0X88, 0X1b, 0Xb6, 0X8f, 0Xd8, 0X60,
- 0X42, 0X1a, 0X7d, 0X3d, 0Xf5, 0X82, 0X80, 0Xf1,
- 0X18, 0X8c, 0X1d, 0X32 };
+ 0x9a, 0x4f, 0x88, 0x1b, 0xb6, 0x8f, 0xd8, 0x60,
+ 0x42, 0x1a, 0x7d, 0x3d, 0xf5, 0x82, 0x80, 0xf1,
+ 0x18, 0x8c, 0x1d, 0x32
+};
static int
@@ -775,7 +777,6 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
-
ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
@@ -785,8 +786,9 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.key.data = hmac_sha1_key;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
- /* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
+ /* Create crypto session*/
+ ut_params->sess = rte_cryptodev_session_create(
+ ts_params->valid_devs[0],
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -832,7 +834,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
CIPHER_IV_LENGTH_AES_CBC,
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
- "Ciphertext data not as expected");
+ "ciphertext data not as expected");
TEST_ASSERT_BUFFERS_ARE_EQUAL(
rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
@@ -1239,7 +1241,6 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
"Digest verification failed");
-
return TEST_SUCCESS;
}
@@ -1361,7 +1362,6 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
DIGEST_BYTE_LENGTH_SHA512,
"Generated digest data not as expected");
-
return TEST_SUCCESS;
}
@@ -1417,6 +1417,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
ut_params->auth_xform.auth.key.data = hmac_sha512_key;
ut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA512;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
+
return TEST_SUCCESS;
}
@@ -1490,8 +1491,6 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
"Digest verification failed");
-
-
return TEST_SUCCESS;
}
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 034393e..083266a 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -47,8 +47,9 @@
#define DEFAULT_NUM_XFORMS (2)
#define NUM_MBUFS (8191)
#define MBUF_CACHE_SIZE (250)
-#define MBUF_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512 + \
- sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATAPAYLOAD_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512)
+#define MBUF_SIZE (sizeof(struct rte_mbuf) + \
+ RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
#define BYTE_LENGTH(x) (x/8)
/* HASH DIGEST LENGTHS */
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 728bcf0..87f0670 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -1741,7 +1741,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < num_to_submit ; b++) {
tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ (const char *)data_params[0].expected.ciphertext,
data_params[0].length, 0);
TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
@@ -1814,7 +1814,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, burst_size);
if (burst_received == 0)
failed_polls++;
@@ -1823,11 +1824,15 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
end_cycles = rte_rdtsc_precise();
total_cycles += end_cycles - start_cycles;
}
+
while (num_received != num_to_submit) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, burst_size);
if (burst_received == 0)
failed_polls++;
@@ -1861,10 +1866,10 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
{
uint16_t index;
uint32_t burst_sent, burst_received;
- uint32_t b, num_sent, num_received, throughput;
+ uint32_t b, num_sent, num_received;
uint64_t failed_polls, retries, start_cycles, end_cycles;
const uint64_t mhz = rte_get_tsc_hz()/1000000;
- double mmps;
+ double throughput, mmps;
struct rte_mbuf *rx_mbufs[DEFAULT_BURST_SIZE], *tx_mbufs[DEFAULT_BURST_SIZE];
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
@@ -1904,7 +1909,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
"AES128_CBC_SHA256_HMAC requests with a constant burst "
"size of %u while varying payload sizes", DEFAULT_BURST_SIZE);
printf("\nDev No\tQP No\tReq Size(B)\tNum Sent\tNum Received\t"
- "Mrps\tThoughput(Mbps)");
+ "Mrps\tThoughput(Gbps)");
printf("\tRetries (Attempted a burst, but the device was busy)");
for (index = 0; index < MAX_PACKET_SIZE_INDEX; index++) {
num_sent = 0;
@@ -1957,15 +1962,19 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent) < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent : DEFAULT_BURST_SIZE);
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, tx_mbufs,
+ ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
+ < DEFAULT_BURST_SIZE) ?
+ DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
+ DEFAULT_BURST_SIZE);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num,
0, rx_mbufs, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
@@ -1973,10 +1982,13 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
num_received += burst_received;
}
while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num, 0,
+ burst_received =
+ rte_cryptodev_dequeue_burst(dev_num, 0,
rx_mbufs, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
@@ -1984,11 +1996,14 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
num_received += burst_received;
}
end_cycles = rte_rdtsc_precise();
- mmps = (double)num_received*mhz/(end_cycles - start_cycles);
- throughput = mmps*data_params[index].length*8;
+ mmps = ((double)num_received * mhz) /
+ (end_cycles - start_cycles);
+ throughput = (mmps * data_params[index].length * 8) / 1000;
+
printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
- data_params[index].length, num_sent, num_received);
- printf("\t%.2f\t%u", mmps, throughput);
+ data_params[index].length,
+ num_sent, num_received);
+ printf("\t%.2f\t%.2f", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
@@ -2002,6 +2017,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
rte_pktmbuf_free(tx_mbufs[b]);
}
}
+
printf("\n");
return TEST_SUCCESS;
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index c7d884c..f2afdb6 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -377,17 +377,21 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m,
get_digest_byte_length(job->hash_alg));
- if (job->auth_tag_output)
- memset(job->auth_tag_output, 0,
- sizeof(get_digest_byte_length(job->hash_alg)));
- else
+ if (job->auth_tag_output == NULL) {
+ MB_LOG_ERR("failed to allocate space in output mbuf "
+ "for temp digest");
return NULL;
+ }
+
+ memset(job->auth_tag_output, 0,
+ sizeof(get_digest_byte_length(job->hash_alg)));
+
} else {
job->auth_tag_output = c_op->digest.data;
}
/*
- * Multiple buffer library current only support returning a truncated
+ * Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
job->auth_tag_output_len_in_bytes =
@@ -489,7 +493,7 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
if (m)
rte_ring_enqueue(qp->processed_pkts, (void *)m);
else
- qp->qp_stats.dequeue_err_count++;
+ qp->stats.dequeue_err_count++;
job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
}
@@ -513,19 +517,19 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
for (i = 0; i < nb_bufs; i++) {
ol = rte_pktmbuf_offload_get(bufs[i], RTE_PKTMBUF_OL_CRYPTO);
if (unlikely(ol == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ qp->stats.enqueue_err_count++;
goto flush_jobs;
}
sess = get_session(qp, &ol->op.crypto);
if (unlikely(sess == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ qp->stats.enqueue_err_count++;
goto flush_jobs;
}
job = process_crypto_op(qp, bufs[i], &ol->op.crypto, sess);
if (unlikely(job == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ qp->stats.enqueue_err_count++;
goto flush_jobs;
}
@@ -543,7 +547,7 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
if (processed_jobs == 0)
goto flush_jobs;
else
- qp->qp_stats.enqueued_count += processed_jobs;
+ qp->stats.enqueued_count += processed_jobs;
return i;
flush_jobs:
@@ -553,7 +557,7 @@ flush_jobs:
*/
job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
if (job)
- qp->qp_stats.enqueued_count += handle_completed_jobs(qp, job);
+ qp->stats.enqueued_count += handle_completed_jobs(qp, job);
return i;
}
@@ -568,7 +572,7 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair,
nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
(void **)bufs, nb_bufs);
- qp->qp_stats.dequeued_count += nb_dequeued;
+ qp->stats.dequeued_count += nb_dequeued;
return nb_dequeued;
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index e43b898..76a85ff 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -76,11 +76,11 @@ aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- stats->enqueued_count += qp->qp_stats.enqueued_count;
- stats->dequeued_count += qp->qp_stats.dequeued_count;
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
- stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
- stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
}
}
@@ -93,7 +93,7 @@ aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
}
}
@@ -196,7 +196,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp->sess_mp = dev->data->session_pool;
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
/* Initialise multi-buffer manager */
(*qp->ops->job.init_mgr)(&qp->mb_mgr);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 2f98609..304c85c 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -150,7 +150,7 @@ struct aesni_mb_qp {
/**< Ring for placing process packets */
struct rte_mempool *sess_mp;
/**< Session Mempool */
- struct rte_cryptodev_stats qp_stats;
+ struct rte_cryptodev_stats stats;
/**< Queue pair statistics */
} __rte_cache_aligned;
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 892375d..0ccd1b8 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -130,17 +130,6 @@ struct rte_cryptodev_qp_conf {
typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
enum rte_cryptodev_event_type event, void *cb_arg);
-#ifdef RTE_CRYPTODEV_PERF
-/**
- * Crypto Device performance counter statistics structure. This structure is
- * used for RDTSC counters for measuring crypto operations.
- */
-struct rte_cryptodev_perf_stats {
- uint64_t t_accumlated; /**< Accumulated time processing operation */
- uint64_t t_min; /**< Max time */
- uint64_t t_max; /**< Min time */
-};
-#endif
/** Crypto Device statistics */
struct rte_cryptodev_stats {
@@ -153,29 +142,8 @@ struct rte_cryptodev_stats {
/**< Total error count on operations enqueued */
uint64_t dequeue_err_count;
/**< Total error count on operations dequeued */
+};
-#ifdef RTE_CRYPTODEV_DETAILED_STATS
- struct {
- uint64_t encrypt_ops; /**< Count of encrypt operations */
- uint64_t encrypt_bytes; /**< Number of bytes encrypted */
-
- uint64_t decrypt_ops; /**< Count of decrypt operations */
- uint64_t decrypt_bytes; /**< Number of bytes decrypted */
- } cipher; /**< Cipher operations stats */
-
- struct {
- uint64_t generate_ops; /**< Count of generate operations */
- uint64_t bytes_hashed; /**< Number of bytes hashed */
-
- uint64_t verify_ops; /**< Count of verify operations */
- uint64_t bytes_verified;/**< Number of bytes verified */
- } hash; /**< Hash operations stats */
-#endif
-
-#ifdef RTE_CRYPTODEV_PERF
- struct rte_cryptodev_perf_stats op_perf; /**< Operations stats */
-#endif
-} __rte_cache_aligned;
/**
* Create a virtual crypto device
@@ -632,7 +600,6 @@ extern struct rte_cryptodev_session *
rte_cryptodev_session_create(uint8_t dev_id,
struct rte_crypto_xform *xform);
-
/**
* Free the memory associated with a previously allocated session.
*
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index 8270afa..a16d109 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -56,11 +56,6 @@ extern "C" {
#include "rte_crypto.h"
#include "rte_cryptodev.h"
-struct rte_cryptodev_stats;
-struct rte_cryptodev_info;
-struct rte_cryptodev_qp_conf;
-
-enum rte_cryptodev_event_type;
#ifdef RTE_LIBRTE_CRYPTODEV_DEBUG
#define RTE_PMD_DEBUG_TRACE(...) \
@@ -80,7 +75,6 @@ struct rte_cryptodev_session {
};
struct rte_cryptodev_driver;
-struct rte_cryptodev;
/**
* Initialisation function of a crypto driver invoked for each matching
@@ -427,9 +421,9 @@ struct rte_cryptodev_ops {
cryptodev_info_get_t dev_infos_get; /**< Get device info. */
cryptodev_stats_get_t stats_get;
- /**< Get generic device statistics. */
+ /**< Get device statistics. */
cryptodev_stats_reset_t stats_reset;
- /**< Reset generic device statistics. */
+ /**< Reset device statistics. */
cryptodev_queue_pair_setup_t queue_pair_setup;
/**< Set up a device queue pair. */
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v9 2/5] cryptodev: refactor to partition common from symmetric-specific code
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 0/5] " Fiona Trahe
` (2 preceding siblings ...)
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 1/5] cryptodev: code cleanup Fiona Trahe
@ 2016-03-10 15:41 ` Fiona Trahe
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 3/5] cryptodev: remove unused phys_addr field from key Fiona Trahe
` (3 subsequent siblings)
7 siblings, 0 replies; 62+ messages in thread
From: Fiona Trahe @ 2016-03-10 15:41 UTC (permalink / raw)
To: dev
This patch splits symmetric specific definitions and
functions away from the common crypto APIs to facilitate the future extension
and expansion of the cryptodev framework, in order to allow asymmetric
crypto operations to be introduced at a later date, as well as to clean the
logical structure of the public includes. The patch also introduces the _sym
prefix to symmetric specific structure and functions to improve clarity in
the API.
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
---
app/test/test_cryptodev.c | 161 +++---
app/test/test_cryptodev_perf.c | 42 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 44 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 6 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 4 +-
drivers/crypto/qat/qat_crypto.c | 51 +-
drivers/crypto/qat/qat_crypto.h | 10 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 31 +-
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 547 +------------------
lib/librte_cryptodev/rte_crypto_sym.h | 598 +++++++++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 39 +-
lib/librte_cryptodev/rte_cryptodev.h | 46 +-
lib/librte_cryptodev/rte_cryptodev_pmd.h | 22 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 6 +-
lib/librte_mbuf_offload/rte_mbuf_offload.h | 22 +-
17 files changed, 867 insertions(+), 771 deletions(-)
create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 1a0f204..5ced183 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,13 +57,13 @@ struct crypto_testsuite_params {
};
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
struct rte_mbuf_offload *ol;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -78,7 +78,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_param);
@@ -166,7 +166,7 @@ testsuite_setup(void)
"MBUF_OFFLOAD_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -221,7 +221,7 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id,
&ts_params->conf),
@@ -276,7 +276,7 @@ ut_setup(void)
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
ts_params->conf.session_mp.nb_objs =
- (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_PMD) ?
+ (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_SYM_PMD) ?
DEFAULT_NUM_OPS_INFLIGHT :
DEFAULT_NUM_OPS_INFLIGHT;
@@ -320,7 +320,7 @@ ut_teardown(void)
/* free crypto session structure */
if (ut_params->sess) {
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
ut_params->sess);
ut_params->sess = NULL;
}
@@ -465,7 +465,7 @@ test_queue_pair_descriptor_setup(void)
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- ts_params->conf.session_mp.nb_objs = dev_info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = dev_info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
&ts_params->conf), "Failed to configure cryptodev %u",
@@ -768,7 +768,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -777,7 +777,8 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -787,21 +788,21 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create crypto session*/
- ut_params->sess = rte_cryptodev_session_create(
+ ut_params->sess = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0],
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -864,18 +865,18 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
ut_params->op = &ut_params->ol->op.crypto;
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_xforms(
+ TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
ut_params->ol, 2),
"failed to allocate space for crypto transforms");
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
/* cipher parameters */
ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
@@ -884,7 +885,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
@@ -960,7 +961,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_SHA1);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -969,7 +970,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -979,13 +980,14 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -993,7 +995,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1068,7 +1070,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1077,7 +1079,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1087,13 +1089,14 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1101,7 +1104,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1170,7 +1173,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_SHA256);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1179,7 +1182,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1189,13 +1192,14 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1203,7 +1207,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1283,7 +1287,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1292,7 +1296,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1302,15 +1306,16 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1318,7 +1323,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1371,7 +1376,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
struct crypto_unittest_params *ut_params);
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params);
@@ -1386,8 +1391,9 @@ test_AES_CBC_HMAC_SHA512_decrypt_digest_verify(void)
"Failed to create session params");
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
return test_AES_CBC_HMAC_SHA512_decrypt_perform(ut_params->sess,
@@ -1400,7 +1406,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
{
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1409,7 +1415,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1423,7 +1429,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params)
{
@@ -1443,7 +1449,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1451,7 +1457,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1521,7 +1527,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1530,7 +1536,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1540,13 +1546,14 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1554,7 +1561,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)
rte_pktmbuf_prepend(ut_params->ibuf,
@@ -1614,7 +1621,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
DIGEST_BYTE_LENGTH_AES_XCBC);
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1623,7 +1630,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1633,13 +1640,14 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ut_params->sess =
+ rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+ &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1647,7 +1655,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
@@ -1751,7 +1759,7 @@ test_multi_session(void)
struct crypto_unittest_params *ut_params = &unittest_params;
struct rte_cryptodev_info dev_info;
- struct rte_cryptodev_session **sessions;
+ struct rte_cryptodev_sym_session **sessions;
uint16_t i;
@@ -1760,12 +1768,13 @@ test_multi_session(void)
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- sessions = rte_malloc(NULL, (sizeof(struct rte_cryptodev_session *) *
- dev_info.max_nb_sessions) + 1, 0);
+ sessions = rte_malloc(NULL,
+ (sizeof(struct rte_cryptodev_sym_session *) *
+ dev_info.sym.max_nb_sessions) + 1, 0);
/* Create multiple crypto sessions*/
- for (i = 0; i < dev_info.max_nb_sessions; i++) {
- sessions[i] = rte_cryptodev_session_create(
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++) {
+ sessions[i] = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(sessions[i],
@@ -1780,13 +1789,13 @@ test_multi_session(void)
}
/* Next session create should fail */
- sessions[i] = rte_cryptodev_session_create(ts_params->valid_devs[0],
+ sessions[i] = rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
&ut_params->auth_xform);
TEST_ASSERT_NULL(sessions[i],
"Session creation succeeded unexpectedly!");
- for (i = 0; i < dev_info.max_nb_sessions; i++)
- rte_cryptodev_session_free(ts_params->valid_devs[0],
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++)
+ rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
sessions[i]);
rte_free(sessions);
@@ -1805,7 +1814,7 @@ test_not_in_place_crypto(void)
/* Create multiple crypto sessions*/
- ut_params->sess = rte_cryptodev_session_create(
+ ut_params->sess = rte_cryptodev_sym_session_create(
ts_params->valid_devs[0], &ut_params->auth_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1827,7 +1836,7 @@ test_not_in_place_crypto(void)
/* Generate Crypto op data structure */
ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ut_params->ol,
"Failed to allocate pktmbuf offload");
@@ -1835,7 +1844,7 @@ test_not_in_place_crypto(void)
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
ut_params->op->digest.data = ut_params->digest;
ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1961,7 +1970,7 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite = {
static int
test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_qat_testsuite);
}
static struct test_command cryptodev_qat_cmd = {
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 87f0670..b0c8abf 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -63,12 +63,12 @@ struct crypto_testsuite_params {
#define MAX_NUM_OF_OPS_PER_UT (128)
struct crypto_unittest_params {
- struct rte_crypto_xform cipher_xform;
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_op *op;
+ struct rte_crypto_sym_op *op;
struct rte_mbuf_offload *ol;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
@@ -127,7 +127,7 @@ testsuite_setup(void)
ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_xform),
+ sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
if (ts_params->mbuf_ol_pool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -179,7 +179,7 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+ ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->dev_id,
&ts_params->conf),
@@ -252,7 +252,7 @@ ut_teardown(void)
/* free crypto session structure */
if (ut_params->sess)
- rte_cryptodev_session_free(ts_params->dev_id,
+ rte_cryptodev_sym_session_free(ts_params->dev_id,
ut_params->sess);
/* free crypto operation structure */
@@ -1713,7 +1713,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1723,7 +1723,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1733,7 +1733,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1753,12 +1753,12 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
DIGEST_BYTE_LENGTH_SHA256);
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO);
+ ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
@@ -1881,7 +1881,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
}
/* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1890,7 +1890,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
ut_params->cipher_xform.cipher.key.length = CIPHER_IV_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1900,7 +1900,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1933,12 +1933,12 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_sym_op *cop = &ol->op.crypto;
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(cop, ut_params->sess);
cop->digest.data = ut_params->digest;
cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -2060,7 +2060,7 @@ perftest_aesni_mb_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
static int
perftest_qat_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_PMD;
+ gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_testsuite);
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index f2afdb6..f39ebd5 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -109,7 +109,7 @@ calculate_auth_precomputes(hash_one_block_t one_block_hash,
/** Get xform chain order */
static int
-aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
{
/*
* Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
@@ -119,12 +119,12 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
if (xform->next == NULL || xform->next->next != NULL)
return -1;
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return HASH_CIPHER;
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return CIPHER_HASH;
return -1;
@@ -134,11 +134,11 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
static int
aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
hash_one_block_t hash_oneblock_fn;
- if (xform->type != RTE_CRYPTO_XFORM_AUTH) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
MB_LOG_ERR("Crypto xform struct not of type auth");
return -1;
}
@@ -196,11 +196,11 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
static int
aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
aes_keyexp_t aes_keyexp_fn;
- if (xform->type != RTE_CRYPTO_XFORM_CIPHER) {
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
MB_LOG_ERR("Crypto xform struct not of type cipher");
return -1;
}
@@ -259,10 +259,10 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform)
+ const struct rte_crypto_sym_xform *xform)
{
- const struct rte_crypto_xform *auth_xform = NULL;
- const struct rte_crypto_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
/* Select Crypto operation - hash then cipher / cipher then hash */
switch (aesni_mb_get_chain_order(xform)) {
@@ -296,11 +296,11 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
if (unlikely(crypto_op->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
@@ -313,7 +313,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
return NULL;
sess = (struct aesni_mb_session *)
- ((struct rte_cryptodev_session *)_sess)->_private;
+ ((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
sess, crypto_op->xform) != 0)) {
@@ -339,7 +339,8 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
*/
static JOB_AES_HMAC *
process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_op *c_op, struct aesni_mb_session *session)
+ struct rte_crypto_sym_op *c_op,
+ struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
@@ -436,14 +437,14 @@ static struct rte_mbuf *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
struct rte_mbuf *m;
- struct rte_crypto_op *c_op;
+ struct rte_crypto_sym_op *c_op;
if (job->user_data == NULL)
return NULL;
/* handled retrieved job */
m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_op *)job->user_data2;
+ c_op = (struct rte_crypto_sym_op *)job->user_data2;
/* set status as successful by default */
c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -463,7 +464,7 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_OP_SESSIONLESS) {
+ if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
rte_mempool_put(qp->sess_mp, c_op->session);
c_op->session = NULL;
}
@@ -515,7 +516,8 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
int i, processed_jobs = 0;
for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i], RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(bufs[i],
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
qp->stats.enqueue_err_count++;
goto flush_jobs;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 76a85ff..d56de12 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -108,7 +108,7 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
if (dev_info != NULL) {
dev_info->dev_type = dev->dev_type;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->max_nb_sessions = internals->max_nb_sessions;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
}
}
@@ -243,7 +243,7 @@ aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
/** Configure a aesni multi-buffer session from a crypto xform chain */
static void *
aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform, void *sess)
{
struct aesni_mb_private *internals = dev->data->dev_private;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 304c85c..0aed177 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -218,7 +218,7 @@ struct aesni_mb_session {
extern int
aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
struct aesni_mb_session *sess,
- const struct rte_crypto_xform *xform);
+ const struct rte_crypto_sym_xform *xform);
/** device specific operations function pointer structure */
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 828756b..e7b9027 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,37 +89,37 @@ void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
}
static int
-qat_get_cmd_id(const struct rte_crypto_xform *xform)
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
{
if (xform->next == NULL)
return -1;
/* Cipher Only */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_CIPHER; */
/* Authentication Only */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH && xform->next == NULL)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
return -1; /* return ICP_QAT_FW_LA_CMD_AUTH; */
/* Cipher then Authenticate */
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
/* Authenticate then Cipher */
- if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
return -1;
}
static struct rte_crypto_auth_xform *
-qat_get_auth_xform(struct rte_crypto_xform *xform)
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_AUTH)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
return &xform->auth;
xform = xform->next;
@@ -129,10 +129,10 @@ qat_get_auth_xform(struct rte_crypto_xform *xform)
}
static struct rte_crypto_cipher_xform *
-qat_get_cipher_xform(struct rte_crypto_xform *xform)
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
{
do {
- if (xform->type == RTE_CRYPTO_XFORM_CIPHER)
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
return &xform->cipher;
xform = xform->next;
@@ -144,7 +144,7 @@ qat_get_cipher_xform(struct rte_crypto_xform *xform)
void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private)
+ struct rte_crypto_sym_xform *xform, void *session_private)
{
struct qat_pmd_private *internals = dev->data->dev_private;
@@ -261,7 +261,7 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
auth_xform->digest_length))
goto error_out;
- return (struct rte_cryptodev_session *)session;
+ return (struct rte_crypto_sym_session *)session;
error_out:
rte_mempool_put(internals->sess_mp, session);
@@ -275,7 +275,7 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
+uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
register struct qat_queue *queue;
@@ -327,7 +327,8 @@ kick_tail:
}
uint16_t
-qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct rte_mbuf_offload *ol;
struct qat_queue *queue;
@@ -343,12 +344,13 @@ qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
msg_counter != nb_pkts) {
rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf, RTE_PKTMBUF_OL_CRYPTO);
-
+ ol = rte_pktmbuf_offload_get(rx_mbuf,
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ ol->op.crypto.status =
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
@@ -384,20 +386,21 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO);
+ ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
if (unlikely(ol == NULL)) {
PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
"to (%p) mbuf.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
" requests mbuf (%p) is sessionless.", mbuf);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type != RTE_CRYPTODEV_QAT_PMD)) {
+ if (unlikely(ol->op.crypto.session->type
+ != RTE_CRYPTODEV_QAT_SYM_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
@@ -520,8 +523,8 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
ADF_NUM_SYM_QPS_PER_BUNDLE *
ADF_NUM_BUNDLES_PER_DEV;
- info->max_nb_sessions = internals->max_nb_sessions;
- info->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
}
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index d680364..e9f71fe 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -109,16 +109,18 @@ qat_crypto_sym_session_init(struct rte_mempool *mempool, void *priv_sess);
extern void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+ struct rte_crypto_sym_xform *xform, void *session_private);
extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
uint16_t
-qat_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
uint16_t
-qat_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index e500c1e..85700fc 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,11 +89,11 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->pci_dev->addr.devid,
cryptodev->pci_dev->addr.function);
- cryptodev->dev_type = RTE_CRYPTODEV_QAT_PMD;
+ cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
+ cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index a950b74..ee519e7 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -124,13 +124,13 @@ struct l2fwd_crypto_options {
enum l2fwd_crypto_xform_chain xform_chain;
- struct rte_crypto_xform cipher_xform;
+ struct rte_crypto_sym_xform cipher_xform;
uint8_t ckey_data[32];
struct rte_crypto_key iv_key;
uint8_t ivkey_data[16];
- struct rte_crypto_xform auth_xform;
+ struct rte_crypto_sym_xform auth_xform;
uint8_t akey_data[128];
};
@@ -141,9 +141,8 @@ struct l2fwd_crypto_params {
unsigned digest_length;
unsigned block_size;
-
struct rte_crypto_key iv_key;
- struct rte_cryptodev_session *session;
+ struct rte_cryptodev_sym_session *session;
};
/** lcore configuration */
@@ -372,7 +371,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
/* Append space for digest to end of packet */
ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
@@ -474,11 +473,11 @@ generate_random_key(uint8_t *key, unsigned length)
key[i] = rand() % 0xff;
}
-static struct rte_cryptodev_session *
+static struct rte_cryptodev_sym_session *
initialize_crypto_session(struct l2fwd_crypto_options *options,
uint8_t cdev_id)
{
- struct rte_crypto_xform *first_xform;
+ struct rte_crypto_sym_xform *first_xform;
if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
first_xform = &options->cipher_xform;
@@ -489,7 +488,7 @@ initialize_crypto_session(struct l2fwd_crypto_options *options,
}
/* Setup Cipher Parameters */
- return rte_cryptodev_session_create(cdev_id, first_xform);
+ return rte_cryptodev_sym_session_create(cdev_id, first_xform);
}
static void
@@ -610,7 +609,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
m = pkts_burst[j];
ol = rte_pktmbuf_offload_alloc(
l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ RTE_PKTMBUF_OL_CRYPTO_SYM);
/*
* If we can't allocate a offload, then drop
* the rest of the burst and dequeue and
@@ -689,7 +688,7 @@ parse_cryptodev_type(enum rte_cryptodev_type *type, char *optarg)
*type = RTE_CRYPTODEV_AESNI_MB_PMD;
return 0;
} else if (strcmp("QAT", optarg) == 0) {
- *type = RTE_CRYPTODEV_QAT_PMD;
+ *type = RTE_CRYPTODEV_QAT_SYM_PMD;
return 0;
}
@@ -937,7 +936,7 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
/* Cipher Data */
- options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+ options->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
options->cipher_xform.next = NULL;
options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -946,12 +945,11 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
generate_random_key(options->ckey_data, sizeof(options->ckey_data));
options->cipher_xform.cipher.key.data = options->ckey_data;
- options->cipher_xform.cipher.key.phys_addr = 0;
options->cipher_xform.cipher.key.length = 16;
/* Authentication Data */
- options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+ options->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
options->auth_xform.next = NULL;
options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
@@ -963,7 +961,6 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
generate_random_key(options->akey_data, sizeof(options->akey_data));
options->auth_xform.auth.key.data = options->akey_data;
- options->auth_xform.auth.key.phys_addr = 0;
options->auth_xform.auth.key.length = 20;
}
@@ -982,7 +979,7 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
switch (options->cdev_type) {
case RTE_CRYPTODEV_AESNI_MB_PMD:
printf("cryptodev type: AES-NI MB PMD\n"); break;
- case RTE_CRYPTODEV_QAT_PMD:
+ case RTE_CRYPTODEV_QAT_SYM_PMD:
printf("cryptodev type: QAT PMD\n"); break;
default:
break;
@@ -1179,7 +1176,7 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports)
unsigned i, cdev_id, cdev_count, enabled_cdev_count = 0;
int retval;
- if (options->cdev_type == RTE_CRYPTODEV_QAT_PMD) {
+ if (options->cdev_type == RTE_CRYPTODEV_QAT_SYM_PMD) {
if (rte_cryptodev_count() < nb_ports)
return -1;
} else if (options->cdev_type == RTE_CRYPTODEV_AESNI_MB_PMD) {
diff --git a/lib/librte_cryptodev/Makefile b/lib/librte_cryptodev/Makefile
index 81fa3fc..0d59229 100644
--- a/lib/librte_cryptodev/Makefile
+++ b/lib/librte_cryptodev/Makefile
@@ -45,6 +45,7 @@ SRCS-y += rte_cryptodev.c
# export include files
SYMLINK-y-include += rte_crypto.h
+SYMLINK-y-include += rte_crypto_sym.h
SYMLINK-y-include += rte_cryptodev.h
SYMLINK-y-include += rte_cryptodev_pmd.h
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index 42343a8..620c00b 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,313 +36,14 @@
/**
* @file rte_crypto.h
*
- * RTE Cryptographic Definitions
+ * RTE Cryptography Common Definitions
*
- * Defines symmetric cipher and authentication algorithms and modes, as well
- * as supported symmetric crypto operation combinations.
*/
#ifdef __cplusplus
extern "C" {
#endif
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_mempool.h>
-
-/** Symmetric Cipher Algorithms */
-enum rte_crypto_cipher_algorithm {
- RTE_CRYPTO_CIPHER_NULL = 1,
- /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
-
- RTE_CRYPTO_CIPHER_3DES_CBC,
- /**< Triple DES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_3DES_CTR,
- /**< Triple DES algorithm in CTR mode */
- RTE_CRYPTO_CIPHER_3DES_ECB,
- /**< Triple DES algorithm in ECB mode */
-
- RTE_CRYPTO_CIPHER_AES_CBC,
- /**< AES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_AES_CCM,
- /**< AES algorithm in CCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_CCM* element of the
- * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_xform* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation
- */
- RTE_CRYPTO_CIPHER_AES_CTR,
- /**< AES algorithm in Counter mode */
- RTE_CRYPTO_CIPHER_AES_ECB,
- /**< AES algorithm in ECB mode */
- RTE_CRYPTO_CIPHER_AES_F8,
- /**< AES algorithm in F8 mode */
- RTE_CRYPTO_CIPHER_AES_GCM,
- /**< AES algorithm in GCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_GCM* element of the
- * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_setup_data* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation.
- */
- RTE_CRYPTO_CIPHER_AES_XTS,
- /**< AES algorithm in XTS mode */
-
- RTE_CRYPTO_CIPHER_ARC4,
- /**< (A)RC4 cipher algorithm */
-
- RTE_CRYPTO_CIPHER_KASUMI_F8,
- /**< Kasumi algorithm in F8 mode */
-
- RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- /**< SNOW3G algorithm in UEA2 mode */
-
- RTE_CRYPTO_CIPHER_ZUC_EEA3
- /**< ZUC algorithm in EEA3 mode */
-};
-
-/** Symmetric Cipher Direction */
-enum rte_crypto_cipher_operation {
- RTE_CRYPTO_CIPHER_OP_ENCRYPT,
- /**< Encrypt cipher operation */
- RTE_CRYPTO_CIPHER_OP_DECRYPT
- /**< Decrypt cipher operation */
-};
-
-/** Crypto key structure */
-struct rte_crypto_key {
- uint8_t *data; /**< pointer to key data */
- phys_addr_t phys_addr;
- size_t length; /**< key length in bytes */
-};
-
-/**
- * Symmetric Cipher Setup Data.
- *
- * This structure contains data relating to Cipher (Encryption and Decryption)
- * use to create a session.
- */
-struct rte_crypto_cipher_xform {
- enum rte_crypto_cipher_operation op;
- /**< This parameter determines if the cipher operation is an encrypt or
- * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
- * only encrypt operations are valid.
- */
- enum rte_crypto_cipher_algorithm algo;
- /**< Cipher algorithm */
-
- struct rte_crypto_key key;
- /**< Cipher key
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
- * point to a concatenation of the AES encryption key followed by a
- * keymask. As per RFC3711, the keymask should be padded with trailing
- * bytes to match the length of the encryption key used.
- *
- * For AES-XTS mode of operation, two keys must be provided and
- * key.data must point to the two keys concatenated together (Key1 ||
- * Key2). The cipher key length will contain the total size of both
- * keys.
- *
- * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
- * 192 bits (24 bytes) or 256 bits (32 bytes).
- *
- * For the CCM mode of operation, the only supported key length is 128
- * bits (16 bytes).
- *
- * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
- * should be set to the combined length of the encryption key and the
- * keymask. Since the keymask and the encryption key are the same size,
- * key.length should be set to 2 x the AES encryption key length.
- *
- * For the AES-XTS mode of operation:
- * - Two keys must be provided and key.length refers to total length of
- * the two keys.
- * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
- * - Both keys must have the same size.
- **/
-};
-
-/** Symmetric Authentication / Hash Algorithms */
-enum rte_crypto_auth_algorithm {
- RTE_CRYPTO_AUTH_NULL = 1,
- /**< NULL hash algorithm. */
-
- RTE_CRYPTO_AUTH_AES_CBC_MAC,
- /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
- RTE_CRYPTO_AUTH_AES_CCM,
- /**< AES algorithm in CCM mode. This is an authenticated cipher. When
- * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
- * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
- * set up the related rte_crypto_cipher_setup_data structure in the
- * session context or the corresponding parameter in the crypto
- * operation data structures op_params parameter MUST be set for a
- * session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_CMAC,
- /**< AES CMAC algorithm. */
- RTE_CRYPTO_AUTH_AES_GCM,
- /**< AES algorithm in GCM mode. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_GMAC,
- /**< AES GMAC algorithm. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
- RTE_CRYPTO_AUTH_AES_XCBC_MAC,
- /**< AES XCBC algorithm. */
-
- RTE_CRYPTO_AUTH_KASUMI_F9,
- /**< Kasumi algorithm in F9 mode. */
-
- RTE_CRYPTO_AUTH_MD5,
- /**< MD5 algorithm */
- RTE_CRYPTO_AUTH_MD5_HMAC,
- /**< HMAC using MD5 algorithm */
-
- RTE_CRYPTO_AUTH_SHA1,
- /**< 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA1_HMAC,
- /**< HMAC using 128 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224,
- /**< 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA224_HMAC,
- /**< HMAC using 224 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256,
- /**< 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA256_HMAC,
- /**< HMAC using 256 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384,
- /**< 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA384_HMAC,
- /**< HMAC using 384 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512,
- /**< 512 bit SHA algorithm. */
- RTE_CRYPTO_AUTH_SHA512_HMAC,
- /**< HMAC using 512 bit SHA algorithm. */
-
- RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- /**< SNOW3G algorithm in UIA2 mode. */
-
- RTE_CRYPTO_AUTH_ZUC_EIA3,
- /**< ZUC algorithm in EIA3 mode */
-};
-
-/** Symmetric Authentication / Hash Operations */
-enum rte_crypto_auth_operation {
- RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
- RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
-};
-
-/**
- * Authentication / Hash transform data.
- *
- * This structure contains data relating to an authentication/hash crypto
- * transforms. The fields op, algo and digest_length are common to all
- * authentication transforms and MUST be set.
- */
-struct rte_crypto_auth_xform {
- enum rte_crypto_auth_operation op;
- /**< Authentication operation type */
- enum rte_crypto_auth_algorithm algo;
- /**< Authentication algorithm selection */
-
- struct rte_crypto_key key; /**< Authentication key data.
- * The authentication key length MUST be less than or equal to the
- * block size of the algorithm. It is the callers responsibility to
- * ensure that the key length is compliant with the standard being used
- * (for example RFC 2104, FIPS 198a).
- */
-
- uint32_t digest_length;
- /**< Length of the digest to be returned. If the verify option is set,
- * this specifies the length of the digest to be compared for the
- * session.
- *
- * If the value is less than the maximum length allowed by the hash,
- * the result shall be truncated. If the value is greater than the
- * maximum length allowed by the hash then an error will be generated
- * by *rte_cryptodev_session_create* or by the
- * *rte_cryptodev_enqueue_burst* if using session-less APIs.
- */
-
- uint32_t add_auth_data_length;
- /**< The length of the additional authenticated data (AAD) in bytes.
- * The maximum permitted value is 240 bytes, unless otherwise specified
- * below.
- *
- * This field must be specified when the hash algorithm is one of the
- * following:
- *
- * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
- * length of the IV (which should be 16).
- *
- * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
- * the length of the Additional Authenticated Data (called A, in NIST
- * SP800-38D).
- *
- * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
- * the length of the associated data (called A, in NIST SP800-38C).
- * Note that this does NOT include the length of any padding, or the
- * 18 bytes reserved at the start of the above field to store the
- * block B0 and the encoded length. The maximum permitted value in
- * this case is 222 bytes.
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
- * this field is not used and should be set to 0. Instead the length
- * of the AAD data is specified in the message length to hash field of
- * the rte_crypto_op_data structure.
- */
-};
-
-/** Crypto transformation types */
-enum rte_crypto_xform_type {
- RTE_CRYPTO_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
- RTE_CRYPTO_XFORM_AUTH, /**< Authentication xform */
- RTE_CRYPTO_XFORM_CIPHER /**< Cipher xform */
-};
-
-/**
- * Crypto transform structure.
- *
- * This is used to specify the crypto transforms required, multiple transforms
- * can be chained together to specify a chain transforms such as authentication
- * then cipher, or cipher then authentication. Each transform structure can
- * hold a single transform, the type field is used to specify which transform
- * is contained within the union
- */
-struct rte_crypto_xform {
- struct rte_crypto_xform *next; /**< next xform in chain */
-
- enum rte_crypto_xform_type type; /**< xform type */
- union {
- struct rte_crypto_auth_xform auth;
- /**< Authentication / hash xform */
- struct rte_crypto_cipher_xform cipher;
- /**< Cipher xform */
- };
-};
-
-/**
- * Crypto operation session type. This is used to specify whether a crypto
- * operation has session structure attached for immutable parameters or if all
- * operation information is included in the operation data structure.
- */
-enum rte_crypto_op_sess_type {
- RTE_CRYPTO_OP_WITH_SESSION, /**< Session based crypto operation */
- RTE_CRYPTO_OP_SESSIONLESS /**< Session-less crypto operation */
-};
-
/** Status of crypto operation */
enum rte_crypto_op_status {
RTE_CRYPTO_OP_STATUS_SUCCESS,
@@ -359,249 +60,7 @@ enum rte_crypto_op_status {
/**< Error handling operation */
};
-/**
- * Cryptographic Operation Data.
- *
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_enqueue_burst() call
- * for performing cipher, hash, or a combined hash and cipher operations.
- */
-struct rte_crypto_op {
- enum rte_crypto_op_sess_type type;
- enum rte_crypto_op_status status;
-
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
-
- union {
- struct rte_cryptodev_session *session;
- /**< Handle for the initialised session context */
- struct rte_crypto_xform *xform;
- /**< Session-less API crypto operation parameters */
- };
-
- struct {
- struct {
- uint32_t offset;
- /**< Starting point for cipher processing, specified
- * as number of bytes from start of data in the source
- * buffer. The result of the cipher operation will be
- * written back into the output buffer starting at
- * this location.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source buffer
- * on which the cryptographic operation will be
- * computed. This must be a multiple of the block size
- * if a block cipher is being used. This is also the
- * same as the result length.
- *
- * @note
- * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
- * this value should not include the length of the
- * padding or the length of the MAC; the driver will
- * compute the actual number of bytes over which the
- * encryption will occur, which will include these
- * values.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
- * field should be set to 0.
- */
- } to_cipher; /**< Data offsets and length for ciphering */
-
- struct {
- uint32_t offset;
- /**< Starting point for hash processing, specified as
- * number of bytes from start of packet in source
- * buffer.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
- * mode of operation, this field specifies the start
- * of the AAD data in the source buffer.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source
- * buffer that the hash will be computed on.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
- * of operation, this field specifies the length of
- * the AAD data in the source buffer.
- */
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
-
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
-
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
-
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
-
- void *user_data;
- /**< opaque pointer for user data */
-};
-
-
-/**
- * Reset the fields of a crypto operation to their default values.
- *
- * @param op The crypto operation to be reset.
- */
-static inline void
-__rte_crypto_op_reset(struct rte_crypto_op *op)
-{
- op->type = RTE_CRYPTO_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
-}
-
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_op_attach_session(struct rte_crypto_op *op,
- struct rte_cryptodev_session *sess)
-{
- op->session = sess;
- op->type = RTE_CRYPTO_OP_WITH_SESSION;
-}
+#include <rte_crypto_sym.h>
#ifdef __cplusplus
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
new file mode 100644
index 0000000..de6c701
--- /dev/null
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -0,0 +1,598 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_SYM_H_
+#define _RTE_CRYPTO_SYM_H_
+
+/**
+ * @file rte_crypto_sym.h
+ *
+ * RTE Definitions for Symmetric Cryptography
+ *
+ * Defines symmetric cipher and authentication algorithms and modes, as well
+ * as supported symmetric crypto operation combinations.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+
+/** Symmetric Cipher Algorithms */
+enum rte_crypto_cipher_algorithm {
+ RTE_CRYPTO_CIPHER_NULL = 1,
+ /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
+
+ RTE_CRYPTO_CIPHER_3DES_CBC,
+ /**< Triple DES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_3DES_CTR,
+ /**< Triple DES algorithm in CTR mode */
+ RTE_CRYPTO_CIPHER_3DES_ECB,
+ /**< Triple DES algorithm in ECB mode */
+
+ RTE_CRYPTO_CIPHER_AES_CBC,
+ /**< AES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_AES_CCM,
+ /**< AES algorithm in CCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_CCM* element of the
+ * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_xform* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation
+ */
+ RTE_CRYPTO_CIPHER_AES_CTR,
+ /**< AES algorithm in Counter mode */
+ RTE_CRYPTO_CIPHER_AES_ECB,
+ /**< AES algorithm in ECB mode */
+ RTE_CRYPTO_CIPHER_AES_F8,
+ /**< AES algorithm in F8 mode */
+ RTE_CRYPTO_CIPHER_AES_GCM,
+ /**< AES algorithm in GCM mode. When this cipher algorithm is used the
+ * *RTE_CRYPTO_AUTH_AES_GCM* element of the
+ * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
+ * *rte_crypto_auth_setup_data* structure in the session context or in
+ * the op_params of the crypto operation structure in the case of a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_CIPHER_AES_XTS,
+ /**< AES algorithm in XTS mode */
+
+ RTE_CRYPTO_CIPHER_ARC4,
+ /**< (A)RC4 cipher algorithm */
+
+ RTE_CRYPTO_CIPHER_KASUMI_F8,
+ /**< Kasumi algorithm in F8 mode */
+
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ /**< SNOW3G algorithm in UEA2 mode */
+
+ RTE_CRYPTO_CIPHER_ZUC_EEA3
+ /**< ZUC algorithm in EEA3 mode */
+};
+
+/** Symmetric Cipher Direction */
+enum rte_crypto_cipher_operation {
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ /**< Encrypt cipher operation */
+ RTE_CRYPTO_CIPHER_OP_DECRYPT
+ /**< Decrypt cipher operation */
+};
+
+/** Crypto key structure */
+struct rte_crypto_key {
+ uint8_t *data; /**< pointer to key data */
+ phys_addr_t phys_addr;
+ size_t length; /**< key length in bytes */
+};
+
+/**
+ * Symmetric Cipher Setup Data.
+ *
+ * This structure contains data relating to Cipher (Encryption and Decryption)
+ * use to create a session.
+ */
+struct rte_crypto_cipher_xform {
+ enum rte_crypto_cipher_operation op;
+ /**< This parameter determines if the cipher operation is an encrypt or
+ * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
+ * only encrypt operations are valid.
+ */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< Cipher algorithm */
+
+ struct rte_crypto_key key;
+ /**< Cipher key
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
+ * point to a concatenation of the AES encryption key followed by a
+ * keymask. As per RFC3711, the keymask should be padded with trailing
+ * bytes to match the length of the encryption key used.
+ *
+ * For AES-XTS mode of operation, two keys must be provided and
+ * key.data must point to the two keys concatenated together (Key1 ||
+ * Key2). The cipher key length will contain the total size of both
+ * keys.
+ *
+ * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
+ * 192 bits (24 bytes) or 256 bits (32 bytes).
+ *
+ * For the CCM mode of operation, the only supported key length is 128
+ * bits (16 bytes).
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
+ * should be set to the combined length of the encryption key and the
+ * keymask. Since the keymask and the encryption key are the same size,
+ * key.length should be set to 2 x the AES encryption key length.
+ *
+ * For the AES-XTS mode of operation:
+ * - Two keys must be provided and key.length refers to total length of
+ * the two keys.
+ * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
+ * - Both keys must have the same size.
+ **/
+};
+
+/** Symmetric Authentication / Hash Algorithms */
+enum rte_crypto_auth_algorithm {
+ RTE_CRYPTO_AUTH_NULL = 1,
+ /**< NULL hash algorithm. */
+
+ RTE_CRYPTO_AUTH_AES_CBC_MAC,
+ /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
+ RTE_CRYPTO_AUTH_AES_CCM,
+ /**< AES algorithm in CCM mode. This is an authenticated cipher. When
+ * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
+ * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
+ * set up the related rte_crypto_cipher_setup_data structure in the
+ * session context or the corresponding parameter in the crypto
+ * operation data structures op_params parameter MUST be set for a
+ * session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_CMAC,
+ /**< AES CMAC algorithm. */
+ RTE_CRYPTO_AUTH_AES_GCM,
+ /**< AES algorithm in GCM mode. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_GMAC,
+ /**< AES GMAC algorithm. When this hash algorithm
+ * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+ * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+ * rte_crypto_cipher_setup_data structure in the session context, or
+ * the corresponding parameter in the crypto operation data structures
+ * op_params parameter MUST be set for a session-less crypto operation.
+ */
+ RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ /**< AES XCBC algorithm. */
+
+ RTE_CRYPTO_AUTH_KASUMI_F9,
+ /**< Kasumi algorithm in F9 mode. */
+
+ RTE_CRYPTO_AUTH_MD5,
+ /**< MD5 algorithm */
+ RTE_CRYPTO_AUTH_MD5_HMAC,
+ /**< HMAC using MD5 algorithm */
+
+ RTE_CRYPTO_AUTH_SHA1,
+ /**< 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA1_HMAC,
+ /**< HMAC using 128 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224,
+ /**< 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224_HMAC,
+ /**< HMAC using 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256,
+ /**< 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256_HMAC,
+ /**< HMAC using 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384,
+ /**< 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384_HMAC,
+ /**< HMAC using 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512,
+ /**< 512 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512_HMAC,
+ /**< HMAC using 512 bit SHA algorithm. */
+
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ /**< SNOW3G algorithm in UIA2 mode. */
+
+ RTE_CRYPTO_AUTH_ZUC_EIA3,
+ /**< ZUC algorithm in EIA3 mode */
+};
+
+/** Symmetric Authentication / Hash Operations */
+enum rte_crypto_auth_operation {
+ RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
+ RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
+};
+
+/**
+ * Authentication / Hash transform data.
+ *
+ * This structure contains data relating to an authentication/hash crypto
+ * transforms. The fields op, algo and digest_length are common to all
+ * authentication transforms and MUST be set.
+ */
+struct rte_crypto_auth_xform {
+ enum rte_crypto_auth_operation op;
+ /**< Authentication operation type */
+ enum rte_crypto_auth_algorithm algo;
+ /**< Authentication algorithm selection */
+
+ struct rte_crypto_key key;
+ /**< Authentication key data.
+ * The authentication key length MUST be less than or equal to the
+ * block size of the algorithm. It is the callers responsibility to
+ * ensure that the key length is compliant with the standard being used
+ * (for example RFC 2104, FIPS 198a).
+ */
+
+ uint32_t digest_length;
+ /**< Length of the digest to be returned. If the verify option is set,
+ * this specifies the length of the digest to be compared for the
+ * session.
+ *
+ * If the value is less than the maximum length allowed by the hash,
+ * the result shall be truncated. If the value is greater than the
+ * maximum length allowed by the hash then an error will be generated
+ * by *rte_cryptodev_sym_session_create* or by the
+ * *rte_cryptodev_sym_enqueue_burst* if using session-less APIs.
+ */
+
+ uint32_t add_auth_data_length;
+ /**< The length of the additional authenticated data (AAD) in bytes.
+ * The maximum permitted value is 240 bytes, unless otherwise specified
+ * below.
+ *
+ * This field must be specified when the hash algorithm is one of the
+ * following:
+ *
+ * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
+ * length of the IV (which should be 16).
+ *
+ * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
+ * the length of the Additional Authenticated Data (called A, in NIST
+ * SP800-38D).
+ *
+ * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
+ * the length of the associated data (called A, in NIST SP800-38C).
+ * Note that this does NOT include the length of any padding, or the
+ * 18 bytes reserved at the start of the above field to store the
+ * block B0 and the encoded length. The maximum permitted value in
+ * this case is 222 bytes.
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
+ * this field is not used and should be set to 0. Instead the length
+ * of the AAD data is specified in the message length to hash field of
+ * the rte_crypto_sym_op_data structure.
+ */
+};
+
+/** Crypto transformation types */
+enum rte_crypto_sym_xform_type {
+ RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
+ RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */
+ RTE_CRYPTO_SYM_XFORM_CIPHER /**< Cipher xform */
+};
+
+/**
+ * Symmetric crypto transform structure.
+ *
+ * This is used to specify the crypto transforms required, multiple transforms
+ * can be chained together to specify a chain transforms such as authentication
+ * then cipher, or cipher then authentication. Each transform structure can
+ * hold a single transform, the type field is used to specify which transform
+ * is contained within the union
+ */
+struct rte_crypto_sym_xform {
+ struct rte_crypto_sym_xform *next;
+ /**< next xform in chain */
+ enum rte_crypto_sym_xform_type type
+ ; /**< xform type */
+ union {
+ struct rte_crypto_auth_xform auth;
+ /**< Authentication / hash xform */
+ struct rte_crypto_cipher_xform cipher;
+ /**< Cipher xform */
+ };
+};
+
+/**
+ * Crypto operation session type. This is used to specify whether a crypto
+ * operation has session structure attached for immutable parameters or if all
+ * operation information is included in the operation data structure.
+ */
+enum rte_crypto_sym_op_sess_type {
+ RTE_CRYPTO_SYM_OP_WITH_SESSION, /**< Session based crypto operation */
+ RTE_CRYPTO_SYM_OP_SESSIONLESS /**< Session-less crypto operation */
+};
+
+
+/**
+ * Cryptographic Operation Data.
+ *
+ * This structure contains data relating to performing cryptographic processing
+ * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
+ * call for performing cipher, hash, or a combined hash and cipher operations.
+ */
+struct rte_crypto_sym_op {
+ enum rte_crypto_sym_op_sess_type type;
+ enum rte_crypto_op_status status;
+
+ struct {
+ struct rte_mbuf *m; /**< Destination mbuf */
+ uint8_t offset; /**< Data offset */
+ } dst;
+
+ union {
+ struct rte_cryptodev_sym_session *session;
+ /**< Handle for the initialised session context */
+ struct rte_crypto_sym_xform *xform;
+ /**< Session-less API crypto operation parameters */
+ };
+
+ struct {
+ struct {
+ uint32_t offset;
+ /**< Starting point for cipher processing, specified
+ * as number of bytes from start of data in the source
+ * buffer. The result of the cipher operation will be
+ * written back into the output buffer starting at
+ * this location.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source buffer
+ * on which the cryptographic operation will be
+ * computed. This must be a multiple of the block size
+ * if a block cipher is being used. This is also the
+ * same as the result length.
+ *
+ * @note
+ * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
+ * this value should not include the length of the
+ * padding or the length of the MAC; the driver will
+ * compute the actual number of bytes over which the
+ * encryption will occur, which will include these
+ * values.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
+ * field should be set to 0.
+ */
+ } to_cipher; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint32_t offset;
+ /**< Starting point for hash processing, specified as
+ * number of bytes from start of packet in source
+ * buffer.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
+ * mode of operation, this field specifies the start
+ * of the AAD data in the source buffer.
+ */
+
+ uint32_t length;
+ /**< The message length, in bytes, of the source
+ * buffer that the hash will be computed on.
+ *
+ * @note
+ * For CCM and GCM modes of operation, this field is
+ * ignored. The field @ref additional_auth field
+ * should be set instead.
+ *
+ * @note
+ * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
+ * of operation, this field specifies the length of
+ * the AAD data in the source buffer.
+ */
+ } to_hash; /**< Data offsets and length for authentication */
+ } data; /**< Details of data to be operated on */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
+ * Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length is 96
+ * bits) or J0 (for other sizes), where J0 is as defined by
+ * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
+ * needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the nonce
+ * should be written starting at &iv[1] (to allow space for the
+ * implementation to write in the flags in the first byte).
+ * Note that a full 16 bytes should be allocated, even though
+ * the length field will have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
+ * 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD be
+ * 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ size_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+ * mode, or for SNOW3G in UEA2 mode, this is the length of the
+ * IV (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length of the
+ * counter (which must be the same as the block length of the
+ * cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
+ * which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce, which can
+ * be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result should be
+ * inserted (in the case of digest generation) or where the
+ * purported digest exists (in the case of digest
+ * verification).
+ *
+ * At session creation time, the client specified the digest
+ * result length with the digest_length member of the @ref
+ * rte_crypto_auth_xform structure. For physical crypto
+ * devices the caller must allocate at least digest_length of
+ * physically contiguous memory at this location.
+ *
+ * For digest generation, the digest result will overwrite
+ * any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is understood
+ * to be in the destination buffer for digest generation, and
+ * in the source buffer for digest verification. The location
+ * of the digest result in this case is immediately following
+ * the region over which the digest is computed.
+ */
+ phys_addr_t phys_addr; /**< Physical address of digest */
+ uint32_t length; /**< Length of digest */
+ } digest; /**< Digest parameters */
+
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD) needed for
+ * authenticated cipher mechanisms (CCM and GCM), and to the IV
+ * for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is set up
+ * for the session in the @ref rte_crypto_auth_xform structure
+ * as part of the @ref rte_cryptodev_sym_session_create function
+ * call. This length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
+ * caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset of one
+ * byte into the array, leaving room for the implementation
+ * to write in the flags to the first byte.
+ *
+ * - the additional authentication data itself should be
+ * written starting at an offset of 18 bytes into the array,
+ * leaving room for the length encoding in the first two
+ * bytes of the second block.
+ *
+ * - the array should be big enough to hold the above fields,
+ * plus any padding to round this up to the nearest multiple
+ * of the block size (16 bytes). Padding will be added by
+ * the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus any
+ * space to round this up to the nearest multiple of the
+ * block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set to 0.
+ * Instead the AAD data should be placed in the source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint32_t length; /**< Length of digest */
+ } additional_auth;
+ /**< Additional authentication parameters */
+
+ struct rte_mempool *pool;
+ /**< mempool used to allocate crypto op */
+
+ void *user_data;
+ /**< opaque pointer for user data */
+};
+
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ */
+static inline void
+__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
+{
+ op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
+ op->dst.m = NULL;
+ op->dst.offset = 0;
+}
+
+/** Attach a session to a crypto operation */
+static inline void
+rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ op->session = sess;
+ op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTO_SYM_H_ */
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 2838852..c7fef6a 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -572,8 +572,8 @@ rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id);
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id);
int
rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
@@ -604,8 +604,10 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
}
/* Setup Session mempool for device */
- return rte_crypto_session_pool_create(dev, config->session_mp.nb_objs,
- config->session_mp.cache_size, config->socket_id);
+ return rte_cryptodev_sym_session_pool_create(dev,
+ config->session_mp.nb_objs,
+ config->session_mp.cache_size,
+ config->socket_id);
}
@@ -911,12 +913,12 @@ rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
static void
-rte_crypto_session_init(struct rte_mempool *mp,
+rte_cryptodev_sym_session_init(struct rte_mempool *mp,
void *opaque_arg,
void *_sess,
__rte_unused unsigned i)
{
- struct rte_cryptodev_session *sess = _sess;
+ struct rte_cryptodev_sym_session *sess = _sess;
struct rte_cryptodev *dev = opaque_arg;
memset(sess, 0, mp->elt_size);
@@ -930,8 +932,8 @@ rte_crypto_session_init(struct rte_mempool *mp,
}
static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
- unsigned obj_cache_size, int socket_id)
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id)
{
char mp_name[RTE_CRYPTODEV_NAME_MAX_LEN];
unsigned priv_sess_size;
@@ -951,7 +953,7 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
return -ENOMEM;
}
- unsigned elt_size = sizeof(struct rte_cryptodev_session) +
+ unsigned elt_size = sizeof(struct rte_cryptodev_sym_session) +
priv_sess_size;
dev->data->session_pool = rte_mempool_lookup(mp_name);
@@ -975,7 +977,8 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
0, /* private data size */
NULL, /* obj initialization constructor */
NULL, /* obj initialization constructor arg */
- rte_crypto_session_init, /* obj constructor */
+ rte_cryptodev_sym_session_init,
+ /**< obj constructor*/
dev, /* obj constructor arg */
socket_id, /* socket id */
0); /* flags */
@@ -990,11 +993,12 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
return 0;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform)
{
struct rte_cryptodev *dev;
- struct rte_cryptodev_session *sess;
+ struct rte_cryptodev_sym_session *sess;
void *_sess;
if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
@@ -1010,7 +1014,7 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
return NULL;
}
- sess = (struct rte_cryptodev_session *)_sess;
+ sess = (struct rte_cryptodev_sym_session *)_sess;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_configure, NULL);
if (dev->dev_ops->session_configure(dev, xform, sess->_private) ==
@@ -1026,8 +1030,9 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
return sess;
}
-struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id, struct rte_cryptodev_session *sess)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess)
{
struct rte_cryptodev *dev;
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 0ccd1b8..f4b38c1 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -1,6 +1,6 @@
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,14 +57,14 @@ extern "C" {
/**< Null crypto PMD device name */
#define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd")
/**< AES-NI Multi buffer PMD device name */
-#define CRYPTODEV_NAME_QAT_PMD ("cryptodev_qat_pmd")
-/**< Intel QAT PMD device name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd")
+/**< Intel QAT Symmetric Crypto PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
- RTE_CRYPTODEV_QAT_PMD, /**< QAT PMD */
+ RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT PMD Symmetric Crypto */
};
/* Logging Macros */
@@ -99,8 +99,11 @@ struct rte_cryptodev_info {
unsigned max_nb_queue_pairs;
/**< Maximum number of queues pairs supported by device. */
- unsigned max_nb_sessions;
- /**< Maximum number of sessions supported by device. */
+
+ struct {
+ unsigned max_nb_sessions;
+ /**< Maximum number of sessions supported by device. */
+ } sym;
};
#define RTE_CRYPTODEV_DETACHED (0)
@@ -575,6 +578,23 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
}
+/** Cryptodev symmetric crypto session */
+struct rte_cryptodev_sym_session {
+ struct {
+ uint8_t dev_id;
+ /**< Device Id */
+ enum rte_cryptodev_type type;
+ /** Crypto Device type session created on */
+ struct rte_mempool *mp;
+ /**< Mempool session allocated from */
+ } __rte_aligned(8);
+ /**< Public symmetric session details */
+
+ char _private[0];
+ /**< Private session material */
+};
+
+
/**
* Initialise a session for symmetric cryptographic operations.
*
@@ -596,24 +616,24 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
* @return
* Pointer to the created session or NULL
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id,
- struct rte_crypto_xform *xform);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+ struct rte_crypto_sym_xform *xform);
/**
* Free the memory associated with a previously allocated session.
*
* @param dev_id The device identifier.
* @param session Session pointer previously allocated by
- * *rte_cryptodev_session_create*.
+ * *rte_cryptodev_sym_session_create*.
*
* @return
* NULL on successful freeing of session.
* Session pointer on failure to free session.
*/
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id,
- struct rte_cryptodev_session *session);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *session);
#ifdef __cplusplus
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index a16d109..7d049ea 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -1,6 +1,6 @@
/*-
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -358,7 +358,7 @@ typedef uint32_t (*cryptodev_queue_pair_count_t)(struct rte_cryptodev *dev);
* - On success returns a pointer to a rte_mempool
* - On failure returns a NULL pointer
*/
-typedef int (*cryptodev_create_session_pool_t)(
+typedef int (*cryptodev_sym_create_session_pool_t)(
struct rte_cryptodev *dev, unsigned nb_objs,
unsigned obj_cache_size, int socket_id);
@@ -372,7 +372,7 @@ typedef int (*cryptodev_create_session_pool_t)(
* - On success returns the size of the session structure for device
* - On failure returns 0
*/
-typedef unsigned (*cryptodev_get_session_private_size_t)(
+typedef unsigned (*cryptodev_sym_get_session_private_size_t)(
struct rte_cryptodev *dev);
/**
@@ -386,7 +386,7 @@ typedef unsigned (*cryptodev_get_session_private_size_t)(
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
+typedef void (*cryptodev_sym_initialize_session_t)(struct rte_mempool *mempool,
void *session_private);
/**
@@ -400,14 +400,14 @@ typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
* - Returns private session structure on success.
* - Returns NULL on failure.
*/
-typedef void * (*cryptodev_configure_session_t)(struct rte_cryptodev *dev,
- struct rte_crypto_xform *xform, void *session_private);
+typedef void * (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
/**
* Free Crypto session.
* @param session Cryptodev session structure to free
*/
-typedef void (*cryptodev_free_session_t)(struct rte_cryptodev *dev,
+typedef void (*cryptodev_sym_free_session_t)(struct rte_cryptodev *dev,
void *session_private);
@@ -436,13 +436,13 @@ struct rte_cryptodev_ops {
cryptodev_queue_pair_count_t queue_pair_count;
/**< Get count of the queue pairs. */
- cryptodev_get_session_private_size_t session_get_size;
+ cryptodev_sym_get_session_private_size_t session_get_size;
/**< Return private session. */
- cryptodev_initialize_session_t session_initialize;
+ cryptodev_sym_initialize_session_t session_initialize;
/**< Initialization function for private session data */
- cryptodev_configure_session_t session_configure;
+ cryptodev_sym_configure_session_t session_configure;
/**< Configure a Crypto session. */
- cryptodev_free_session_t session_clear;
+ cryptodev_sym_free_session_t session_clear;
/**< Clear a Crypto sessions private data. */
};
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index ff8e93d..a46af6f 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -16,8 +16,8 @@ DPDK_2.2 {
rte_cryptodev_pmd_driver_register;
rte_cryptodev_pmd_release_device;
rte_cryptodev_pmd_virtual_dev_init;
- rte_cryptodev_session_create;
- rte_cryptodev_session_free;
+ rte_cryptodev_sym_session_create;
+ rte_cryptodev_sym_session_free;
rte_cryptodev_socket_id;
rte_cryptodev_start;
rte_cryptodev_stats_get;
@@ -29,4 +29,4 @@ DPDK_2.2 {
rte_cryptodev_queue_pair_stop;
local: *;
-};
\ No newline at end of file
+};
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
index 77993b6..5ce6058 100644
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ b/lib/librte_mbuf_offload/rte_mbuf_offload.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -67,7 +67,7 @@ extern "C" {
enum rte_mbuf_ol_op_type {
RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
/**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO
+ RTE_PKTMBUF_OL_CRYPTO_SYM
/**< Crypto offload operation */
};
@@ -84,7 +84,7 @@ struct rte_mbuf_offload {
enum rte_mbuf_ol_op_type type; /**< offload type */
union {
- struct rte_crypto_op crypto; /**< Crypto operation */
+ struct rte_crypto_sym_op crypto; /**< Crypto operation */
} op;
};
@@ -194,8 +194,8 @@ __rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
ol->type = type;
switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO:
- __rte_crypto_op_reset(&ol->op.crypto); break;
+ case RTE_PKTMBUF_OL_CRYPTO_SYM:
+ __rte_crypto_sym_op_reset(&ol->op.crypto); break;
default:
break;
}
@@ -278,24 +278,24 @@ __rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
* - On success returns pointer to first crypto xform in crypto operations chain
* - On failure returns NULL
*/
-static inline struct rte_crypto_xform *
-rte_pktmbuf_offload_alloc_crypto_xforms(struct rte_mbuf_offload *ol,
+static inline struct rte_crypto_sym_xform *
+rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
unsigned nb_xforms)
{
- struct rte_crypto_xform *xform;
+ struct rte_crypto_sym_xform *xform;
void *priv_data;
uint16_t size;
- size = sizeof(struct rte_crypto_xform) * nb_xforms;
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
if (priv_data == NULL)
return NULL;
- ol->op.crypto.xform = xform = (struct rte_crypto_xform *)priv_data;
+ ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
do {
- xform->type = RTE_CRYPTO_XFORM_NOT_SPECIFIED;
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
} while (xform);
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v9 3/5] cryptodev: remove unused phys_addr field from key
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 0/5] " Fiona Trahe
` (3 preceding siblings ...)
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 2/5] cryptodev: refactor to partition common from symmetric-specific code Fiona Trahe
@ 2016-03-10 15:41 ` Fiona Trahe
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 4/5] cryptodev: change burst API to be crypto op oriented Fiona Trahe
` (2 subsequent siblings)
7 siblings, 0 replies; 62+ messages in thread
From: Fiona Trahe @ 2016-03-10 15:41 UTC (permalink / raw)
To: dev
Remove unused phys_addr field from key in crypto_xform, simplifiy struct
and fix knock-on impacts in l2fwd-crypto app
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
---
examples/l2fwd-crypto/main.c | 42 ++++++++++++++++++++++++++---------
lib/librte_cryptodev/rte_crypto_sym.h | 16 ++++++-------
2 files changed, 39 insertions(+), 19 deletions(-)
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index ee519e7..9b6b7ef 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -112,6 +112,12 @@ enum l2fwd_crypto_xform_chain {
L2FWD_CRYPTO_HASH_CIPHER
};
+struct l2fwd_key {
+ uint8_t *data;
+ uint32_t length;
+ phys_addr_t phys_addr;
+};
+
/** l2fwd crypto application command line options */
struct l2fwd_crypto_options {
unsigned portmask;
@@ -127,7 +133,7 @@ struct l2fwd_crypto_options {
struct rte_crypto_sym_xform cipher_xform;
uint8_t ckey_data[32];
- struct rte_crypto_key iv_key;
+ struct l2fwd_key iv_key;
uint8_t ivkey_data[16];
struct rte_crypto_sym_xform auth_xform;
@@ -141,7 +147,7 @@ struct l2fwd_crypto_params {
unsigned digest_length;
unsigned block_size;
- struct rte_crypto_key iv_key;
+ struct l2fwd_key iv_key;
struct rte_cryptodev_sym_session *session;
};
@@ -744,7 +750,7 @@ parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
/** Parse crypto key command line argument */
static int
-parse_key(struct rte_crypto_key *key __rte_unused,
+parse_key(struct l2fwd_key *key __rte_unused,
unsigned length __rte_unused, char *arg __rte_unused)
{
printf("Currently an unsupported argument!\n");
@@ -820,11 +826,18 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return parse_cipher_op(&options->cipher_xform.cipher.op,
optarg);
- else if (strcmp(lgopts[option_index].name, "cipher_key") == 0)
- return parse_key(&options->cipher_xform.cipher.key,
- sizeof(options->ckey_data), optarg);
+ else if (strcmp(lgopts[option_index].name, "cipher_key") == 0) {
+ struct l2fwd_key key = { 0 };
+ int retval = 0;
+
+ retval = parse_key(&key, sizeof(options->ckey_data), optarg);
+
+ options->cipher_xform.cipher.key.data = key.data;
+ options->cipher_xform.cipher.key.length = key.length;
- else if (strcmp(lgopts[option_index].name, "iv") == 0)
+ return retval;
+
+ } else if (strcmp(lgopts[option_index].name, "iv") == 0)
return parse_key(&options->iv_key, sizeof(options->ivkey_data),
optarg);
@@ -837,11 +850,18 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return parse_auth_op(&options->auth_xform.auth.op,
optarg);
- else if (strcmp(lgopts[option_index].name, "auth_key") == 0)
- return parse_key(&options->auth_xform.auth.key,
- sizeof(options->akey_data), optarg);
+ else if (strcmp(lgopts[option_index].name, "auth_key") == 0) {
+ struct l2fwd_key key = { 0 };
+ int retval = 0;
+
+ retval = parse_key(&key, sizeof(options->akey_data), optarg);
+
+ options->auth_xform.auth.key.data = key.data;
+ options->auth_xform.auth.key.length = key.length;
+
+ return retval;
- else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
+ } else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
options->sessionless = 1;
return 0;
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index de6c701..270510e 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -111,12 +111,6 @@ enum rte_crypto_cipher_operation {
/**< Decrypt cipher operation */
};
-/** Crypto key structure */
-struct rte_crypto_key {
- uint8_t *data; /**< pointer to key data */
- phys_addr_t phys_addr;
- size_t length; /**< key length in bytes */
-};
/**
* Symmetric Cipher Setup Data.
@@ -133,7 +127,10 @@ struct rte_crypto_cipher_xform {
enum rte_crypto_cipher_algorithm algo;
/**< Cipher algorithm */
- struct rte_crypto_key key;
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } key;
/**< Cipher key
*
* For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
@@ -257,7 +254,10 @@ struct rte_crypto_auth_xform {
enum rte_crypto_auth_algorithm algo;
/**< Authentication algorithm selection */
- struct rte_crypto_key key;
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } key;
/**< Authentication key data.
* The authentication key length MUST be less than or equal to the
* block size of the algorithm. It is the callers responsibility to
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v9 4/5] cryptodev: change burst API to be crypto op oriented
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 0/5] " Fiona Trahe
` (4 preceding siblings ...)
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 3/5] cryptodev: remove unused phys_addr field from key Fiona Trahe
@ 2016-03-10 15:41 ` Fiona Trahe
2016-03-10 16:03 ` Thomas Monjalon
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 5/5] mbuf_offload: remove library Fiona Trahe
2016-03-14 8:59 ` [dpdk-dev] [PATCH v8 0/5] cryptodev API changes Cao, Min
7 siblings, 1 reply; 62+ messages in thread
From: Fiona Trahe @ 2016-03-10 15:41 UTC (permalink / raw)
To: dev
From: Declan Doherty <declan.doherty@intel.com>
This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts
rte_crypto_op's rather than the current implementation which operates on
rte_mbuf bursts, this simplifies the burst processing in the crypto PMDs and the
use of crypto operations in general, including new functions for managing
rte_crypto_op pools.
These changes continues the separation of the symmetric operation parameters
from the more general operation parameters, which will simplify the integration
of asymmetric crypto operations in the future.
PMDs, unit tests and sample applications are also modified to work with the
modified and new API.
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
---
app/test/test_cryptodev.c | 640 +++++++++++----------
app/test/test_cryptodev_perf.c | 221 ++++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 133 +++--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 12 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 4 +-
drivers/crypto/qat/qat_crypto.c | 132 +++--
drivers/crypto/qat/qat_crypto.h | 12 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 4 +-
examples/l2fwd-crypto/main.c | 164 +++---
lib/librte_cryptodev/rte_crypto.h | 352 +++++++++++-
lib/librte_cryptodev/rte_crypto_sym.h | 374 ++++++------
lib/librte_cryptodev/rte_cryptodev.c | 76 +++
lib/librte_cryptodev/rte_cryptodev.h | 108 ++--
lib/librte_cryptodev/rte_cryptodev_version.map | 3 +-
14 files changed, 1397 insertions(+), 838 deletions(-)
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 5ced183..55367df 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -48,7 +48,7 @@ static enum rte_cryptodev_type gbl_cryptodev_type;
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
struct rte_cryptodev_config conf;
struct rte_cryptodev_qp_conf qp_conf;
@@ -62,8 +62,7 @@ struct crypto_unittest_params {
struct rte_cryptodev_sym_session *sess;
- struct rte_mbuf_offload *ol;
- struct rte_crypto_sym_op *op;
+ struct rte_crypto_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -112,19 +111,21 @@ hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
}
#endif
-static struct rte_mbuf *
-process_crypto_request(uint8_t dev_id, struct rte_mbuf *ibuf)
+static struct rte_crypto_op *
+process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)
{
- struct rte_mbuf *obuf = NULL;
#if HEX_DUMP
hexdump_mbuf_data(stdout, "Enqueued Packet", ibuf);
#endif
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &ibuf, 1) != 1) {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
printf("Error sending packet for encryption");
return NULL;
}
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &obuf, 1) == 0)
+
+ op = NULL;
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
rte_pause();
#if HEX_DUMP
@@ -132,7 +133,7 @@ process_crypto_request(uint8_t dev_id, struct rte_mbuf *ibuf)
hexdump_mbuf_data(stdout, "Dequeued Packet", obuf);
#endif
- return obuf;
+ return op;
}
static struct crypto_testsuite_params testsuite_params = { NULL };
@@ -162,13 +163,14 @@ testsuite_setup(void)
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "MBUF_OFFLOAD_POOL",
+ ts_params->op_mpool = rte_crypto_op_pool_create(
+ "MBUF_CRYPTO_SYM_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
sizeof(struct rte_crypto_sym_xform),
rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -253,10 +255,9 @@ testsuite_teardown(void)
rte_mempool_count(ts_params->mbuf_pool));
}
-
- if (ts_params->mbuf_ol_pool != NULL) {
+ if (ts_params->op_mpool != NULL) {
RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n",
- rte_mempool_count(ts_params->mbuf_ol_pool));
+ rte_mempool_count(ts_params->op_mpool));
}
}
@@ -326,8 +327,8 @@ ut_teardown(void)
}
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
/*
* free mbuf - both obuf and ibuf are usually the same,
@@ -793,53 +794,59 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
- /* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ /* Generate crypto op data structure */
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ /* Set crypto operation authentication parameters */
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ /* Set crypto operation cipher parameters */
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ uint8_t *ciphertext = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC);
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(ciphertext,
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
"ciphertext data not as expected");
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ uint8_t *digest = ciphertext + QUOTE_512_BYTES;
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(digest,
catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest,
gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 :
@@ -864,60 +871,66 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
-
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
- ut_params->ol, 2),
+ TEST_ASSERT_NOT_NULL(rte_crypto_op_sym_xforms_alloc(ut_params->op, 2),
"failed to allocate space for crypto transforms");
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ sym_op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
/* cipher parameters */
- ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- ut_params->op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->op->xform->cipher.key.data = aes_cbc_key;
- ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
+ sym_op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ sym_op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ sym_op->xform->cipher.key.data = aes_cbc_key;
+ sym_op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ sym_op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- ut_params->op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
- ut_params->op->xform->next->auth.key.data = hmac_sha1_key;
- ut_params->op->xform->next->auth.digest_length =
+ sym_op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ sym_op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ sym_op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
+ sym_op->xform->next->auth.key.data = hmac_sha1_key;
+ sym_op->xform->next->auth.digest_length =
DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -986,42 +999,48 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ /* attach symmetric crypto session to crypto operations */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
+
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1089,47 +1108,51 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1198,42 +1221,47 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
- /* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1312,43 +1340,46 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
-
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1448,43 +1479,46 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
- /* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1522,10 +1556,6 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
catch_22_quote, QUOTE_512_BYTES, 0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_AES_XCBC);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
@@ -1546,54 +1576,71 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
/* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0],
+ &ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->iv.data = (uint8_t *)
- rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ /* Set operation cipher parameters */
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->m_src, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(sym_op->m_src);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
+
+ /* Set operation authentication parameters */
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ sym_op->m_src, DIGEST_BYTE_LENGTH_AES_XCBC);
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->m_src,
+ CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES);
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+ memset(sym_op->auth.digest.data, 0, DIGEST_BYTE_LENGTH_AES_XCBC);
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC),
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
"Ciphertext data not as expected");
+
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ rte_pktmbuf_mtod_offset(
+ ut_params->op->sym->m_src, uint8_t *,
+ CIPHER_IV_LENGTH_AES_CBC +
+ QUOTE_512_BYTES),
catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
DIGEST_BYTE_LENGTH_AES_XCBC,
"Generated digest data not as expected");
@@ -1612,14 +1659,6 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
(const char *)catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES, 0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_AES_XCBC);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
- rte_memcpy(ut_params->digest,
- catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
- DIGEST_BYTE_LENGTH_AES_XCBC);
-
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
@@ -1646,35 +1685,55 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ /* Set crypto operation data parameters */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- /* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ ut_params->ibuf, DIGEST_BYTE_LENGTH_AES_XCBC);
+ TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+ "no room to append digest");
+
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ ut_params->ibuf, QUOTE_512_BYTES);
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+ rte_memcpy(sym_op->auth.digest.data,
+ catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
+ DIGEST_BYTE_LENGTH_AES_XCBC);
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym->m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1835,50 +1894,53 @@ test_not_in_place_crypto(void)
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+ sym_op->m_dst = dst_m;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
-
- ut_params->op->dst.m = dst_m;
- ut_params->op->dst.offset = 0;
-
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "no crypto operation returned");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto operation processing failed");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->op->dst.m, char *),
+ rte_pktmbuf_mtod(ut_params->op->sym->m_dst, char *),
catch_22_quote,
QUOTE_512_BYTES,
"Plaintext data not as expected");
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index b0c8abf..b43f9aa 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -50,7 +50,7 @@
struct crypto_testsuite_params {
struct rte_mempool *mbuf_mp;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
uint16_t nb_queue_pairs;
@@ -68,8 +68,7 @@ struct crypto_unittest_params {
struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_sym_op *op;
- struct rte_mbuf_offload *ol;
+ struct rte_crypto_op *op;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
struct rte_mbuf *ibuf[MAX_NUM_OF_OPS_PER_UT];
@@ -79,7 +78,7 @@ struct crypto_unittest_params {
static struct rte_mbuf *
setup_test_string(struct rte_mempool *mpool,
- const char *string, size_t len, uint8_t blocksize)
+ const uint8_t *data, size_t len, uint8_t blocksize)
{
struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
size_t t_len = len - (blocksize ? (len % blocksize) : 0);
@@ -92,7 +91,7 @@ setup_test_string(struct rte_mempool *mpool,
return NULL;
}
- rte_memcpy(dst, string, t_len);
+ rte_memcpy(dst, (const void *)data, t_len);
}
return m;
}
@@ -113,23 +112,24 @@ testsuite_setup(void)
ts_params->mbuf_mp = rte_mempool_lookup("CRYPTO_PERF_MBUFPOOL");
if (ts_params->mbuf_mp == NULL) {
/* Not already created so create */
- ts_params->mbuf_mp = rte_mempool_create("CRYPTO_PERF_MBUFPOOL", NUM_MBUFS,
- MBUF_SIZE, MBUF_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
- rte_socket_id(), 0);
+ ts_params->mbuf_mp = rte_pktmbuf_pool_create(
+ "CRYPTO_PERF_MBUFPOOL",
+ NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
+ rte_socket_id());
if (ts_params->mbuf_mp == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_PERF_MBUFPOOL\n");
return TEST_FAILED;
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
- NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_sym_xform),
- rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+
+ ts_params->op_mpool = rte_crypto_op_pool_create("CRYPTO_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ NUM_MBUFS, MBUF_CACHE_SIZE,
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_sym_xform),
+ rte_socket_id());
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -256,8 +256,8 @@ ut_teardown(void)
ut_params->sess);
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
for (i = 0; i < MAX_NUM_OF_OPS_PER_UT; i++) {
if (ut_params->obuf[i])
@@ -1698,11 +1698,12 @@ struct crypto_data_params aes_cbc_hmac_sha256_output[MAX_PACKET_SIZE_INDEX] = {
static int
test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
{
- uint32_t num_to_submit = 2048, max_outstanding_reqs = 512;
- struct rte_mbuf *rx_mbufs[num_to_submit], *tx_mbufs[num_to_submit];
+ uint32_t num_to_submit = 4096;
+ struct rte_crypto_op *c_ops[num_to_submit];
+ struct rte_crypto_op *proc_ops[num_to_submit];
uint64_t failed_polls, retries, start_cycles, end_cycles, total_cycles = 0;
uint32_t burst_sent, burst_received;
- uint32_t b, burst_size, num_sent, num_received;
+ uint32_t i, burst_size, num_sent, num_received;
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1739,46 +1740,50 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure(s) */
- for (b = 0; b < num_to_submit ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ for (i = 0; i < num_to_submit ; i++) {
+ struct rte_mbuf *m = setup_test_string(ts_params->mbuf_mp,
+ data_params[0].expected.ciphertext,
data_params[0].length, 0);
- TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
+ TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(tx_mbufs[b],
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ TEST_ASSERT_NOT_NULL(ut_params->digest,
+ "no room to append digest");
rte_memcpy(ut_params->digest, data_params[0].expected.digest,
DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_sym_op *cop = &ol->op.crypto;
+ struct rte_crypto_op *op =
+ rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+ rte_crypto_op_attach_sym_session(op, ut_params->sess);
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
+ op->sym->auth.digest.data = ut_params->digest;
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
data_params[0].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->auth.data.length = data_params[0].length;
+
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ op->sym->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(m,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[0].length;
+ op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.data.length = data_params[0].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[0].length;
+ op->sym->m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[i] = op;
}
printf("\nTest to measure the IA cycle cost using AES128_CBC_SHA256_HMAC "
@@ -1789,17 +1794,17 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
printf("\nDev No\tQP No\tNum Sent\tNum Received\tTx/Rx burst");
printf("\tRetries (Device Busy)\tAverage IA cycle cost "
"(assuming 0 retries)");
- for (b = 2; b <= 128 ; b *= 2) {
+ for (i = 2; i <= 128 ; i *= 2) {
num_sent = 0;
num_received = 0;
retries = 0;
failed_polls = 0;
- burst_size = b;
+ burst_size = i;
total_cycles = 0;
while (num_sent < num_to_submit) {
start_cycles = rte_rdtsc_precise();
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0,
- &tx_mbufs[num_sent],
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, &c_ops[num_sent],
((num_to_submit-num_sent) < burst_size) ?
num_to_submit-num_sent : burst_size);
if (burst_sent == 0)
@@ -1814,9 +1819,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1831,9 +1835,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_cryptodev_enqueue_burst(dev_num, 0,
NULL, 0);
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1847,16 +1850,9 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
printf("\n");
- for (b = 0; b < max_outstanding_reqs ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ for (i = 0; i < num_to_submit ; i++) {
+ rte_pktmbuf_free(c_ops[i]->sym->m_src);
+ rte_crypto_op_free(c_ops[i]);
}
return TEST_SUCCESS;
}
@@ -1870,7 +1866,10 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
uint64_t failed_polls, retries, start_cycles, end_cycles;
const uint64_t mhz = rte_get_tsc_hz()/1000000;
double throughput, mmps;
- struct rte_mbuf *rx_mbufs[DEFAULT_BURST_SIZE], *tx_mbufs[DEFAULT_BURST_SIZE];
+
+ struct rte_crypto_op *c_ops[DEFAULT_BURST_SIZE];
+ struct rte_crypto_op *proc_ops[DEFAULT_BURST_SIZE];
+
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1919,63 +1918,70 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
+ struct rte_mbuf *m = setup_test_string(
+ ts_params->mbuf_mp,
+ (const uint8_t *)
data_params[index].plaintext,
data_params[index].length,
0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(
- tx_mbufs[b], DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
+ DIGEST_BYTE_LENGTH_SHA256);
+ TEST_ASSERT_NOT_NULL(ut_params->digest
+ , "no room to append digest");
- rte_memcpy(ut_params->digest, data_params[index].expected.digest,
- DIGEST_BYTE_LENGTH_SHA256);
+ rte_memcpy(ut_params->digest,
+ data_params[index].expected.digest,
+ DIGEST_BYTE_LENGTH_SHA256);
+
+ struct rte_crypto_op *op = rte_crypto_op_alloc(
+ ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
+ rte_crypto_op_attach_sym_session(op, ut_params->sess);
- struct rte_crypto_sym_op *cop = &ol->op.crypto;
+ op->sym->auth.digest.data = ut_params->digest;
+ op->sym->auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(m,
+ data_params[index].length);
+ op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+ op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->auth.data.length = data_params[index].length;
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
- tx_mbufs[b], data_params[index].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym->cipher.iv.data = (uint8_t *)
+ rte_pktmbuf_prepend(m,
+ CIPHER_IV_LENGTH_AES_CBC);
+ op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.data.length = data_params[index].length;
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[index].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[index].length;
+ op->sym->m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[b] = op;
}
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num,
- 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
- < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
- DEFAULT_BURST_SIZE);
+ uint16_t burst_size = (DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent) < DEFAULT_BURST_SIZE ?
+ DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent : DEFAULT_BURST_SIZE;
+
+ burst_sent = rte_cryptodev_enqueue_burst(
+ dev_num, 0, c_ops, burst_size);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, DEFAULT_BURST_SIZE);
+ burst_received = rte_cryptodev_dequeue_burst(dev_num,
+ 0, proc_ops, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
@@ -1987,9 +1993,9 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
rte_cryptodev_enqueue_burst(dev_num, 0,
NULL, 0);
- burst_received =
- rte_cryptodev_dequeue_burst(dev_num, 0,
- rx_mbufs, DEFAULT_BURST_SIZE);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops,
+ DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
@@ -2006,15 +2012,8 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
printf("\t%.2f\t%.2f", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ rte_pktmbuf_free(c_ops[b]->sym->m_src);
+ rte_crypto_op_free(c_ops[b]);
}
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index f39ebd5..f28b29f 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -296,16 +296,16 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
{
struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(crypto_op->session->type !=
+ if (op->sym->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
- sess = (struct aesni_mb_session *)crypto_op->session->_private;
+ sess = (struct aesni_mb_session *)op->sym->session->_private;
} else {
void *_sess = NULL;
@@ -316,7 +316,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
- sess, crypto_op->xform) != 0)) {
+ sess, op->sym->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
}
@@ -338,12 +338,14 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
* - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
*/
static JOB_AES_HMAC *
-process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_sym_op *c_op,
+process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
+ struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+ uint16_t m_offset = 0;
+
job = (*qp->ops->job.get_next)(&qp->mb_mgr);
if (unlikely(job == NULL))
return job;
@@ -372,10 +374,26 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
}
/* Mutable crypto operation parameters */
+ if (op->sym->m_dst) {
+ m_src = m_dst = op->sym->m_dst;
+
+ /* append space for output data to mbuf */
+ char *odata = rte_pktmbuf_append(m_dst,
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (odata == NULL)
+ MB_LOG_ERR("failed to allocate space in destination "
+ "mbuf for source data");
+
+ memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ } else {
+ m_dst = m_src;
+ m_offset = op->sym->cipher.data.offset;
+ }
/* Set digest output location */
if (job->cipher_direction == DECRYPT) {
- job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m,
+ job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
get_digest_byte_length(job->hash_alg));
if (job->auth_tag_output == NULL) {
@@ -388,7 +406,7 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
sizeof(get_digest_byte_length(job->hash_alg)));
} else {
- job->auth_tag_output = c_op->digest.data;
+ job->auth_tag_output = op->sym->auth.digest.data;
}
/*
@@ -399,26 +417,22 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
get_truncated_digest_byte_length(job->hash_alg);
/* Set IV parameters */
- job->iv = c_op->iv.data;
- job->iv_len_in_bytes = c_op->iv.length;
+ job->iv = op->sym->cipher.iv.data;
+ job->iv_len_in_bytes = op->sym->cipher.iv.length;
/* Data Parameter */
- job->src = rte_pktmbuf_mtod(m, uint8_t *);
- job->dst = c_op->dst.m ?
- rte_pktmbuf_mtod(c_op->dst.m, uint8_t *) +
- c_op->dst.offset :
- rte_pktmbuf_mtod(m, uint8_t *) +
- c_op->data.to_cipher.offset;
+ job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
- job->cipher_start_src_offset_in_bytes = c_op->data.to_cipher.offset;
- job->msg_len_to_cipher_in_bytes = c_op->data.to_cipher.length;
+ job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
- job->hash_start_src_offset_in_bytes = c_op->data.to_hash.offset;
- job->msg_len_to_hash_in_bytes = c_op->data.to_hash.length;
+ job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
/* Set user data to be crypto operation data struct */
- job->user_data = m;
- job->user_data2 = c_op;
+ job->user_data = op;
+ job->user_data2 = m_dst;
return job;
}
@@ -433,43 +447,41 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
* verification of supplied digest in the case of a HASH_CIPHER operation
* - Returns NULL on invalid job
*/
-static struct rte_mbuf *
+static struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m;
- struct rte_crypto_sym_op *c_op;
+ struct rte_crypto_op *op =
+ (struct rte_crypto_op *)job->user_data;
+ struct rte_mbuf *m_dst =
+ (struct rte_mbuf *)job->user_data2;
- if (job->user_data == NULL)
+ if (op == NULL || m_dst == NULL)
return NULL;
- /* handled retrieved job */
- m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_sym_op *)job->user_data2;
-
/* set status as successful by default */
- c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* check if job has been processed */
if (unlikely(job->status != STS_COMPLETED)) {
- c_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return m;
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return op;
} else if (job->chain_order == HASH_CIPHER) {
/* Verify digest if required */
- if (memcmp(job->auth_tag_output, c_op->digest.data,
+ if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
job->auth_tag_output_len_in_bytes) != 0)
- c_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m, get_digest_byte_length(job->hash_alg));
+ rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
- rte_mempool_put(qp->sess_mp, c_op->session);
- c_op->session = NULL;
+ if (op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
}
- return m;
+ return op;
}
/**
@@ -485,17 +497,16 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
static unsigned
handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m = NULL;
+ struct rte_crypto_op *op = NULL;
unsigned processed_jobs = 0;
while (job) {
processed_jobs++;
- m = post_process_mb_job(qp, job);
- if (m)
- rte_ring_enqueue(qp->processed_pkts, (void *)m);
+ op = post_process_mb_job(qp, job);
+ if (op)
+ rte_ring_enqueue(qp->processed_ops, (void *)op);
else
qp->stats.dequeue_err_count++;
-
job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
}
@@ -503,11 +514,9 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
static uint16_t
-aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
- uint16_t nb_bufs)
+aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
-
struct aesni_mb_session *sess;
struct aesni_mb_qp *qp = queue_pair;
@@ -515,21 +524,23 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
int i, processed_jobs = 0;
- for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i],
- RTE_PKTMBUF_OL_CRYPTO_SYM);
- if (unlikely(ol == NULL)) {
+ for (i = 0; i < nb_ops; i++) {
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ MB_LOG_ERR("PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
qp->stats.enqueue_err_count++;
goto flush_jobs;
}
-
- sess = get_session(qp, &ol->op.crypto);
+#endif
+ sess = get_session(qp, ops[i]);
if (unlikely(sess == NULL)) {
qp->stats.enqueue_err_count++;
goto flush_jobs;
}
- job = process_crypto_op(qp, bufs[i], &ol->op.crypto, sess);
+ job = process_crypto_op(qp, ops[i], sess);
if (unlikely(job == NULL)) {
qp->stats.enqueue_err_count++;
goto flush_jobs;
@@ -565,15 +576,15 @@ flush_jobs:
}
static uint16_t
-aesni_mb_pmd_dequeue_burst(void *queue_pair,
- struct rte_mbuf **bufs, uint16_t nb_bufs)
+aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
struct aesni_mb_qp *qp = queue_pair;
unsigned nb_dequeued;
- nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)bufs, nb_bufs);
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)ops, nb_ops);
qp->stats.dequeued_count += nb_dequeued;
return nb_dequeued;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index d56de12..b1dd103 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -138,9 +138,9 @@ aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
return 0;
}
-/** Create a ring to place process packets on */
+/** Create a ring to place processed operations on */
static struct rte_ring *
-aesni_mb_pmd_qp_create_processed_pkts_ring(struct aesni_mb_qp *qp,
+aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
unsigned ring_size, int socket_id)
{
struct rte_ring *r;
@@ -148,12 +148,12 @@ aesni_mb_pmd_qp_create_processed_pkts_ring(struct aesni_mb_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
if (r->prod.size >= ring_size) {
- MB_LOG_INFO("Reusing existing ring %s for processed packets",
+ MB_LOG_INFO("Reusing existing ring %s for processed ops",
qp->name);
return r;
}
- MB_LOG_ERR("Unable to reuse existing ring %s for processed packets",
+ MB_LOG_ERR("Unable to reuse existing ring %s for processed ops",
qp->name);
return NULL;
}
@@ -189,9 +189,9 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp->ops = &job_ops[internals->vector_mode];
- qp->processed_pkts = aesni_mb_pmd_qp_create_processed_pkts_ring(qp,
+ qp->processed_ops = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
qp_conf->nb_descriptors, socket_id);
- if (qp->processed_pkts == NULL)
+ if (qp->processed_ops == NULL)
goto qp_setup_cleanup;
qp->sess_mp = dev->data->session_pool;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 0aed177..949d9a6 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -146,8 +146,8 @@ struct aesni_mb_qp {
/**< Vector mode dependent pointer table of the multi-buffer APIs */
MB_MGR mb_mgr;
/**< Multi-buffer instance */
- struct rte_ring *processed_pkts;
- /**< Ring for placing process packets */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing process operations */
struct rte_mempool *sess_mp;
/**< Session Mempool */
struct rte_cryptodev_stats stats;
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index e7b9027..11f7fb2 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -72,7 +72,7 @@ static inline uint32_t
adf_modulo(uint32_t data, uint32_t shift);
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg);
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
void *session)
@@ -275,15 +275,16 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
register struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
- register uint32_t nb_pkts_sent = 0;
- register struct rte_mbuf **cur_tx_pkt = tx_pkts;
+ register uint32_t nb_ops_sent = 0;
+ register struct rte_crypto_op **cur_op = ops;
register int ret;
- uint16_t nb_pkts_possible = nb_pkts;
+ uint16_t nb_ops_possible = nb_ops;
register uint8_t *base_addr;
register uint32_t tail;
int overflow;
@@ -294,47 +295,44 @@ uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = queue->tail;
/* Find how many can actually fit on the ring */
- overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_pkts)
+ overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
- queue->max_inflights;
if (overflow > 0) {
rte_atomic16_sub(&tmp_qp->inflights16, overflow);
- nb_pkts_possible = nb_pkts - overflow;
- if (nb_pkts_possible == 0)
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
return 0;
}
- while (nb_pkts_sent != nb_pkts_possible) {
-
- ret = qat_alg_write_mbuf_entry(*cur_tx_pkt,
- base_addr + tail);
+ while (nb_ops_sent != nb_ops_possible) {
+ ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
- if (nb_pkts_sent == 0)
+ if (nb_ops_sent == 0)
return 0;
goto kick_tail;
}
tail = adf_modulo(tail + queue->msg_size, queue->modulo);
- nb_pkts_sent++;
- cur_tx_pkt++;
+ nb_ops_sent++;
+ cur_op++;
}
kick_tail:
WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
queue->hw_queue_number, tail);
queue->tail = tail;
- tmp_qp->stats.enqueued_count += nb_pkts_sent;
- return nb_pkts_sent;
+ tmp_qp->stats.enqueued_count += nb_ops_sent;
+ return nb_ops_sent;
}
uint16_t
-qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
uint32_t msg_counter = 0;
- struct rte_mbuf *rx_mbuf;
+ struct rte_crypto_op *rx_op;
struct icp_qat_fw_comn_resp *resp_msg;
queue = &(tmp_qp->rx_q);
@@ -342,17 +340,20 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
((uint8_t *)queue->base_addr + queue->head);
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
- msg_counter != nb_pkts) {
- rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
+ msg_counter != nb_ops) {
+ rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
*(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
queue->head = adf_modulo(queue->head +
@@ -361,9 +362,8 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
resp_msg = (struct icp_qat_fw_comn_resp *)
((uint8_t *)queue->base_addr +
queue->head);
-
- *rx_pkts = rx_mbuf;
- rx_pkts++;
+ *ops = rx_op;
+ ops++;
msg_counter++;
}
if (msg_counter > 0) {
@@ -377,38 +377,36 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
}
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
{
- struct rte_mbuf_offload *ol;
-
struct qat_session *ctx;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
- if (unlikely(ol == NULL)) {
- PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
- "to (%p) mbuf.", mbuf);
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
return -EINVAL;
}
-
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
+#endif
+ if (unlikely(op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
- " requests mbuf (%p) is sessionless.", mbuf);
+ " requests, op (%p) is sessionless.", op);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type
- != RTE_CRYPTODEV_QAT_SYM_PMD)) {
+ if (unlikely(op->sym->session->type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
- ctx = (struct qat_session *)ol->op.crypto.session->_private;
+ ctx = (struct qat_session *)op->sym->session->_private;
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
*qat_req = ctx->fw_req;
- qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)mbuf;
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
/*
* The following code assumes:
@@ -416,37 +414,37 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
* - always in place.
*/
qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length = mbuf->data_len;
+ qat_req->comn_mid.src_length =
+ rte_pktmbuf_data_len(op->sym->m_src);
qat_req->comn_mid.dest_data_addr =
qat_req->comn_mid.src_data_addr =
- rte_pktmbuf_mtophys(mbuf);
-
+ rte_pktmbuf_mtophys(op->sym->m_src);
cipher_param = (void *)&qat_req->serv_specif_rqpars;
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
- cipher_param->cipher_length = ol->op.crypto.data.to_cipher.length;
- cipher_param->cipher_offset = ol->op.crypto.data.to_cipher.offset;
- if (ol->op.crypto.iv.length &&
- (ol->op.crypto.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array))) {
+ cipher_param->cipher_length = op->sym->cipher.data.length;
+ cipher_param->cipher_offset = op->sym->cipher.data.offset;
+ if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array))) {
rte_memcpy(cipher_param->u.cipher_IV_array,
- ol->op.crypto.iv.data, ol->op.crypto.iv.length);
+ op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
} else {
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr = ol->op.crypto.iv.phys_addr;
+ cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr;
}
- if (ol->op.crypto.digest.phys_addr) {
+ if (op->sym->auth.digest.phys_addr) {
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
- auth_param->auth_res_addr = ol->op.crypto.digest.phys_addr;
+ auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
}
- auth_param->auth_off = ol->op.crypto.data.to_hash.offset;
- auth_param->auth_len = ol->op.crypto.data.to_hash.length;
- auth_param->u1.aad_adr = ol->op.crypto.additional_auth.phys_addr;
+ auth_param->auth_off = op->sym->auth.data.offset;
+ auth_param->auth_len = op->sym->auth.data.length;
+ auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
/* (GCM) aad length(240 max) will be at this location after precompute */
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
@@ -457,9 +455,19 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
}
auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));
+ rte_hexdump(stdout, "src_data:",
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ op->sym->auth.digest.length);
+ rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
+ op->sym->auth.aad.length);
#endif
return 0;
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index e9f71fe..9323383 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -115,12 +115,12 @@ extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
-uint16_t
-qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
-uint16_t
-qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 85700fc..5e51aca 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -92,8 +92,8 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst;
internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 9b6b7ef..6aaa7c0 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -104,6 +104,11 @@ struct pkt_buffer {
struct rte_mbuf *buffer[MAX_PKT_BURST];
};
+struct op_buffer {
+ unsigned len;
+ struct rte_crypto_op *buffer[MAX_PKT_BURST];
+};
+
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
@@ -159,8 +164,8 @@ struct lcore_queue_conf {
unsigned nb_crypto_devs;
unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
- struct pkt_buffer crypto_pkt_buf[RTE_MAX_ETHPORTS];
- struct pkt_buffer tx_pkt_buf[RTE_MAX_ETHPORTS];
+ struct op_buffer op_buf[RTE_MAX_ETHPORTS];
+ struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
} __rte_cache_aligned;
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
@@ -180,7 +185,7 @@ static const struct rte_eth_conf port_conf = {
};
struct rte_mempool *l2fwd_pktmbuf_pool;
-struct rte_mempool *l2fwd_mbuf_ol_pool;
+struct rte_mempool *l2fwd_crypto_op_pool;
/* Per-port statistics struct */
struct l2fwd_port_statistics {
@@ -294,20 +299,21 @@ static int
l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
struct l2fwd_crypto_params *cparams)
{
- struct rte_mbuf **pkt_buffer;
+ struct rte_crypto_op **op_buffer;
unsigned ret;
- pkt_buffer = (struct rte_mbuf **)
- qconf->crypto_pkt_buf[cparams->dev_id].buffer;
+ op_buffer = (struct rte_crypto_op **)
+ qconf->op_buf[cparams->dev_id].buffer;
+
+ ret = rte_cryptodev_enqueue_burst(cparams->dev_id,
+ cparams->qp_id, op_buffer, (uint16_t) n);
- ret = rte_cryptodev_enqueue_burst(cparams->dev_id, cparams->qp_id,
- pkt_buffer, (uint16_t) n);
crypto_statistics[cparams->dev_id].enqueued += ret;
if (unlikely(ret < n)) {
crypto_statistics[cparams->dev_id].errors += (n - ret);
do {
- rte_pktmbuf_offload_free(pkt_buffer[ret]->offload_ops);
- rte_pktmbuf_free(pkt_buffer[ret]);
+ rte_pktmbuf_free(op_buffer[ret]->sym->m_src);
+ rte_crypto_op_free(op_buffer[ret]);
} while (++ret < n);
}
@@ -315,7 +321,8 @@ l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
}
static int
-l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
+l2fwd_crypto_enqueue(struct rte_crypto_op *op,
+ struct l2fwd_crypto_params *cparams)
{
unsigned lcore_id, len;
struct lcore_queue_conf *qconf;
@@ -323,23 +330,23 @@ l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->crypto_pkt_buf[cparams->dev_id].len;
- qconf->crypto_pkt_buf[cparams->dev_id].buffer[len] = m;
+ len = qconf->op_buf[cparams->dev_id].len;
+ qconf->op_buf[cparams->dev_id].buffer[len] = op;
len++;
- /* enough pkts to be sent */
+ /* enough ops to be sent */
if (len == MAX_PKT_BURST) {
l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams);
len = 0;
}
- qconf->crypto_pkt_buf[cparams->dev_id].len = len;
+ qconf->op_buf[cparams->dev_id].len = len;
return 0;
}
static int
l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
- struct rte_mbuf_offload *ol,
+ struct rte_crypto_op *op,
struct l2fwd_crypto_params *cparams)
{
struct ether_hdr *eth_hdr;
@@ -377,43 +384,43 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_op_attach_sym_session(op, cparams->session);
/* Append space for digest to end of packet */
- ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
+ op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
cparams->digest_length);
- ol->op.crypto.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
- ol->op.crypto.digest.length = cparams->digest_length;
+ op->sym->auth.digest.length = cparams->digest_length;
+
+ op->sym->auth.data.offset = ipdata_offset;
+ op->sym->auth.data.length = data_len;
- ol->op.crypto.iv.data = cparams->iv_key.data;
- ol->op.crypto.iv.phys_addr = cparams->iv_key.phys_addr;
- ol->op.crypto.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_cipher.offset = ipdata_offset;
- ol->op.crypto.data.to_cipher.length = data_len;
+ op->sym->cipher.iv.data = cparams->iv_key.data;
+ op->sym->cipher.iv.phys_addr = cparams->iv_key.phys_addr;
+ op->sym->cipher.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_hash.offset = ipdata_offset;
- ol->op.crypto.data.to_hash.length = data_len;
+ op->sym->cipher.data.offset = ipdata_offset;
+ op->sym->cipher.data.length = data_len;
- rte_pktmbuf_offload_attach(m, ol);
+ op->sym->m_src = m;
- return l2fwd_crypto_enqueue(m, cparams);
+ return l2fwd_crypto_enqueue(op, cparams);
}
/* Send the burst of packets on an output interface */
static int
-l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)
+l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
+ uint8_t port)
{
struct rte_mbuf **pkt_buffer;
unsigned ret;
- unsigned queueid = 0;
- pkt_buffer = (struct rte_mbuf **)qconf->tx_pkt_buf[port].buffer;
+ pkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer;
- ret = rte_eth_tx_burst(port, (uint16_t) queueid, pkt_buffer,
- (uint16_t)n);
+ ret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n);
port_statistics[port].tx += ret;
if (unlikely(ret < n)) {
port_statistics[port].dropped += (n - ret);
@@ -435,8 +442,8 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->tx_pkt_buf[port].len;
- qconf->tx_pkt_buf[port].buffer[len] = m;
+ len = qconf->pkt_buf[port].len;
+ qconf->pkt_buf[port].buffer[len] = m;
len++;
/* enough pkts to be sent */
@@ -445,7 +452,7 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
len = 0;
}
- qconf->tx_pkt_buf[port].len = len;
+ qconf->pkt_buf[port].len = len;
return 0;
}
@@ -505,6 +512,8 @@ static void
l2fwd_main_loop(struct l2fwd_crypto_options *options)
{
struct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST];
+ struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
+
unsigned lcore_id = rte_lcore_id();
uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
unsigned i, j, portid, nb_rx;
@@ -565,12 +574,12 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
if (unlikely(diff_tsc > drain_tsc)) {
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- if (qconf->tx_pkt_buf[portid].len == 0)
+ if (qconf->pkt_buf[portid].len == 0)
continue;
l2fwd_send_burst(&lcore_queue_conf[lcore_id],
- qconf->tx_pkt_buf[portid].len,
+ qconf->pkt_buf[portid].len,
(uint8_t) portid);
- qconf->tx_pkt_buf[portid].len = 0;
+ qconf->pkt_buf[portid].len = 0;
}
/* if timer is enabled */
@@ -599,8 +608,6 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
* Read packet from RX queues
*/
for (i = 0; i < qconf->nb_rx_ports; i++) {
- struct rte_mbuf_offload *ol;
-
portid = qconf->rx_port_list[i];
cparams = &port_cparams[i];
@@ -610,44 +617,49 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
port_statistics[portid].rx += nb_rx;
- /* Enqueue packets from Crypto device*/
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- ol = rte_pktmbuf_offload_alloc(
- l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO_SYM);
+ if (nb_rx) {
/*
- * If we can't allocate a offload, then drop
+ * If we can't allocate a crypto_ops, then drop
* the rest of the burst and dequeue and
* process the packets to free offload structs
*/
- if (unlikely(ol == NULL)) {
- for (; j < nb_rx; j++) {
- rte_pktmbuf_free(pkts_burst[j]);
- port_statistics[portid].dropped++;
- }
- break;
+ if (rte_crypto_op_bulk_alloc(
+ l2fwd_crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops_burst, nb_rx) !=
+ nb_rx) {
+ for (j = 0; j < nb_rx; j++)
+ rte_pktmbuf_free(pkts_burst[i]);
+
+ nb_rx = 0;
}
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- rte_prefetch0((void *)ol);
+ /* Enqueue packets from Crypto device*/
+ for (j = 0; j < nb_rx; j++) {
+ m = pkts_burst[j];
- l2fwd_simple_crypto_enqueue(m, ol, cparams);
+ l2fwd_simple_crypto_enqueue(m,
+ ops_burst[j], cparams);
+ }
}
/* Dequeue packets from Crypto device */
- nb_rx = rte_cryptodev_dequeue_burst(
- cparams->dev_id, cparams->qp_id,
- pkts_burst, MAX_PKT_BURST);
- crypto_statistics[cparams->dev_id].dequeued += nb_rx;
-
- /* Forward crypto'd packets */
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- rte_pktmbuf_offload_free(m->offload_ops);
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- l2fwd_simple_forward(m, portid);
- }
+ do {
+ nb_rx = rte_cryptodev_dequeue_burst(
+ cparams->dev_id, cparams->qp_id,
+ ops_burst, MAX_PKT_BURST);
+
+ crypto_statistics[cparams->dev_id].dequeued +=
+ nb_rx;
+
+ /* Forward crypto'd packets */
+ for (j = 0; j < nb_rx; j++) {
+ m = ops_burst[j]->sym->m_src;
+
+ rte_crypto_op_free(ops_burst[j]);
+ l2fwd_simple_forward(m, portid);
+ }
+ } while (nb_rx == MAX_PKT_BURST);
}
}
}
@@ -1384,15 +1396,17 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n");
/* create the mbuf pool */
- l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 128,
- 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512,
+ sizeof(struct rte_crypto_op),
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
if (l2fwd_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
/* create crypto op pool */
- l2fwd_mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "mbuf_offload_pool", NB_MBUF, 128, 0, rte_socket_id());
- if (l2fwd_mbuf_ol_pool == NULL)
+ l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, 0,
+ rte_socket_id());
+ if (l2fwd_crypto_op_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
/* Enable Ethernet ports */
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index 620c00b..5bc3eaa 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -44,23 +44,369 @@
extern "C" {
#endif
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+#include "rte_crypto_sym.h"
+
+/** Crypto operation types */
+enum rte_crypto_op_type {
+ RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ /**< Undefined operation type */
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ /**< Symmetric operation */
+};
+
/** Status of crypto operation */
enum rte_crypto_op_status {
RTE_CRYPTO_OP_STATUS_SUCCESS,
/**< Operation completed successfully */
- RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
- /**< Operation not yet submitted to a cryptodev */
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+ /**< Operation has not yet been processed by a crypto device */
RTE_CRYPTO_OP_STATUS_ENQUEUED,
/**< Operation is enqueued on device */
RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
/**< Authentication verification failed */
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION,
+ /**<
+ * Symmetric operation failed due to invalid session arguments, or if
+ * in session-less mode, failed to allocate private operation material.
+ */
RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
/**< Operation failed due to invalid arguments in request */
RTE_CRYPTO_OP_STATUS_ERROR,
/**< Error handling operation */
};
-#include <rte_crypto_sym.h>
+/**
+ * Cryptographic Operation.
+ *
+ * This structure contains data relating to performing cryptographic
+ * operations. This operation structure is used to contain any operation which
+ * is supported by the cryptodev API, PMDs should check the type parameter to
+ * verify that the operation is a support function of the device. Crypto
+ * operations are enqueued and dequeued in crypto PMDs using the
+ * rte_cryptodev_enqueue_burst() / rte_cryptodev_dequeue_burst() .
+ */
+struct rte_crypto_op {
+ enum rte_crypto_op_type type;
+ /**< operation type */
+
+ enum rte_crypto_op_status status;
+ /**<
+ * operation status - this is reset to
+ * RTE_CRYPTO_OP_STATUS_NOT_PROCESSED on allocation from mempool and
+ * will be set to RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation
+ * is successfully processed by a crypto PMD
+ */
+
+ struct rte_mempool *mempool;
+ /**< crypto operation mempool which operation is allocated from */
+
+ phys_addr_t phys_addr;
+ /**< physical address of crypto operation */
+
+ void *opaque_data;
+ /**< Opaque pointer for user data */
+
+ union {
+ struct rte_crypto_sym_op *sym;
+ /**< Symmetric operation parameters */
+ }; /**< operation specific parameters */
+} __rte_cache_aligned;
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ * @param type The crypto operation type.
+ */
+static inline void
+__rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type)
+{
+ op->type = type;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (type) {
+ case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
+ /** Symmetric operation structure starts after the end of the
+ * rte_crypto_op structure.
+ */
+ op->sym = (struct rte_crypto_sym_op *)(op + 1);
+ op->type = type;
+
+ __rte_crypto_sym_op_reset(op->sym);
+ break;
+ default:
+ break;
+ }
+
+ op->opaque_data = NULL;
+}
+
+/**
+ * Private data structure belonging to a crypto symmetric operation pool.
+ */
+struct rte_crypto_op_pool_private {
+ enum rte_crypto_op_type type;
+ /**< Crypto op pool type operation. */
+ uint16_t priv_size;
+ /**< Size of private area in each crypto operation. */
+};
+
+
+/**
+ * Returns the size of private data allocated with each rte_crypto_op object by
+ * the mempool
+ *
+ * @param mempool rte_crypto_op mempool
+ *
+ * @return private data size
+ */
+static inline uint16_t
+__rte_crypto_op_get_priv_data_size(struct rte_mempool *mempool)
+{
+ struct rte_crypto_op_pool_private *priv =
+ (struct rte_crypto_op_pool_private *) rte_mempool_get_priv(mempool);
+
+ return priv->priv_size;
+}
+
+
+/**
+ * Creates a crypto operation pool
+ *
+ * @param name pool name
+ * @param type crypto operation type, use
+ * RTE_CRYPTO_OP_TYPE_UNDEFINED for a pool which
+ * supports all operation types
+ * @param nb_elts number of elements in pool
+ * @param cache_size Number of elements to cache on lcore, see
+ * *rte_mempool_create* for further details about
+ * cache size
+ * @param priv_size Size of private data to allocate with each
+ * operation
+ * @param socket_id Socket to allocate memory on
+ *
+ * @return
+ * - On success pointer to mempool
+ * - On failure NULL
+ */
+extern struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id);
+
+/**
+ * Bulk allocate raw element from mempool and return as crypto operations
+ *
+ * @param mempool crypto operation mempool.
+ * @param type crypto operation type.
+ * @param ops Array to place allocated crypto operations
+ * @param nb_ops Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns number of ops allocated
+ */
+static inline int
+__rte_crypto_op_raw_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_crypto_op_pool_private *priv;
+
+ priv = (struct rte_crypto_op_pool_private *) rte_mempool_get_priv(mempool);
+ if (unlikely(priv->type != type &&
+ priv->type != RTE_CRYPTO_OP_TYPE_UNDEFINED))
+ return -EINVAL;
+
+ if (rte_mempool_get_bulk(mempool, (void **)ops, nb_ops) == 0)
+ return nb_ops;
+
+ return 0;
+}
+
+/**
+ * Allocate a crypto operation from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_op *
+rte_crypto_op_alloc(struct rte_mempool *mempool, enum rte_crypto_op_type type)
+{
+ struct rte_crypto_op *op = NULL;
+ int retval;
+
+ retval = __rte_crypto_op_raw_bulk_alloc(mempool, type, &op, 1);
+ if (unlikely(retval != 1))
+ return NULL;
+
+ __rte_crypto_op_reset(op, type);
+
+ return op;
+}
+
+
+/**
+ * Bulk allocate crypto operations from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ * @param ops Array to place allocated crypto operations
+ * @param nb_ops Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+
+static inline unsigned
+rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ if (unlikely(__rte_crypto_op_raw_bulk_alloc(mempool, type, ops, nb_ops)
+ != nb_ops))
+ return 0;
+
+ for (i = 0; i < nb_ops; i++)
+ __rte_crypto_op_reset(ops[i], type);
+
+ return nb_ops;
+}
+
+
+
+/**
+ * Returns a pointer to the private data of a crypto operation if
+ * that operation has enough capacity for requested size.
+ *
+ * @param op crypto operation.
+ * @param size size of space requested in private data.
+ *
+ * @returns
+ * - if sufficient space available returns pointer to start of private data
+ * - if insufficient space returns NULL
+ */
+static inline void *
+__rte_crypto_op_get_priv_data(struct rte_crypto_op *op, uint32_t size)
+{
+ uint32_t priv_size;
+
+ if (likely(op->mempool != NULL)) {
+ priv_size = __rte_crypto_op_get_priv_data_size(op->mempool);
+
+ if (likely(priv_size >= size))
+ return (void *)((uint8_t *)(op + 1) +
+ sizeof(struct rte_crypto_sym_op));
+ }
+
+ return NULL;
+}
+
+/**
+ * free crypto operation structure
+ * If operation has been allocate from a rte_mempool, then the operation will
+ * be returned to the mempool.
+ *
+ * @param op symmetric crypto operation
+ */
+static inline void
+rte_crypto_op_free(struct rte_crypto_op *op)
+{
+ if (op != NULL && op->mempool != NULL)
+ rte_mempool_put(op->mempool, op);
+}
+
+/**
+ * Allocate a symmetric crypto operation in the private data of an mbuf.
+ *
+ * @param m mbuf which is associated with the crypto operation, the
+ * operation will be allocated in the private data of that
+ * mbuf.
+ *
+ * @returns
+ * - On success returns a pointer to the crypto operation.
+ * - On failure returns NULL.
+ */
+static inline struct rte_crypto_op *
+rte_crypto_sym_op_alloc_from_mbuf_priv_data(struct rte_mbuf *m)
+{
+ if (unlikely(m == NULL))
+ return NULL;
+
+ /*
+ * check that the mbuf's private data size is sufficient to contain a
+ * crypto operation
+ */
+ if (unlikely(m->priv_size < (sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op))))
+ return NULL;
+
+ /* private data starts immediately after the mbuf header in the mbuf. */
+ struct rte_crypto_op *op = (struct rte_crypto_op *)(m + 1);
+
+ __rte_crypto_op_reset(op, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+
+ op->mempool = NULL;
+ op->sym->m_src = m;
+
+ return op;
+}
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type and configures
+ * the chaining of the xforms in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+rte_crypto_op_sym_xforms_alloc(struct rte_crypto_op *op, uint8_t nb_xforms)
+{
+ void *priv_data;
+ uint32_t size;
+
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return NULL;
+
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
+
+ priv_data = __rte_crypto_op_get_priv_data(op, size);
+ if (priv_data == NULL)
+ return NULL;
+
+ return __rte_crypto_sym_op_sym_xforms_alloc(op->sym, priv_data,
+ nb_xforms);
+}
+
+
+/**
+ * Attach a session to a crypto operation
+ *
+ * @param op crypto operation, must be of type symmetric
+ * @param sess cryptodev session
+ */
+static inline int
+rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return -1;
+
+ return __rte_crypto_sym_op_attach_sym_session(op->sym, sess);
+}
#ifdef __cplusplus
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index 270510e..831bbf1 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -46,6 +46,8 @@
extern "C" {
#endif
+#include <string.h>
+
#include <rte_mbuf.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -111,7 +113,6 @@ enum rte_crypto_cipher_operation {
/**< Decrypt cipher operation */
};
-
/**
* Symmetric Cipher Setup Data.
*
@@ -128,8 +129,8 @@ struct rte_crypto_cipher_xform {
/**< Cipher algorithm */
struct {
- uint8_t *data; /**< pointer to key data */
- size_t length; /**< key length in bytes */
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
} key;
/**< Cipher key
*
@@ -255,8 +256,8 @@ struct rte_crypto_auth_xform {
/**< Authentication algorithm selection */
struct {
- uint8_t *data; /**< pointer to key data */
- size_t length; /**< key length in bytes */
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
} key;
/**< Authentication key data.
* The authentication key length MUST be less than or equal to the
@@ -347,21 +348,24 @@ enum rte_crypto_sym_op_sess_type {
};
+struct rte_cryptodev_sym_session;
+
/**
- * Cryptographic Operation Data.
+ * Symmetric Cryptographic Operation.
*
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
- * call for performing cipher, hash, or a combined hash and cipher operations.
+ * This structure contains data relating to performing symmetric cryptographic
+ * processing on a referenced mbuf data buffer.
+ *
+ * When a symmetric crypto operation is enqueued with the device for processing
+ * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
+ * which contains the source data which the crypto operation is to be performed
+ * on.
*/
struct rte_crypto_sym_op {
- enum rte_crypto_sym_op_sess_type type;
- enum rte_crypto_op_status status;
+ struct rte_mbuf *m_src; /**< source mbuf */
+ struct rte_mbuf *m_dst; /**< destination mbuf */
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
+ enum rte_crypto_sym_op_sess_type type;
union {
struct rte_cryptodev_sym_session *session;
@@ -372,7 +376,7 @@ struct rte_crypto_sym_op {
struct {
struct {
- uint32_t offset;
+ uint32_t offset;
/**< Starting point for cipher processing, specified
* as number of bytes from start of data in the source
* buffer. The result of the cipher operation will be
@@ -380,7 +384,7 @@ struct rte_crypto_sym_op {
* this location.
*/
- uint32_t length;
+ uint32_t length;
/**< The message length, in bytes, of the source buffer
* on which the cryptographic operation will be
* computed. This must be a multiple of the block size
@@ -399,17 +403,68 @@ struct rte_crypto_sym_op {
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
* field should be set to 0.
*/
- } to_cipher; /**< Data offsets and length for ciphering */
+ } data; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * Initialisation Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length
+ * is 96 bits) or J0 (for other sizes), where J0 is as
+ * defined by NIST SP800-38D. Regardless of the IV
+ * length, a full 16 bytes needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the
+ * nonce should be written starting at &iv[1] (to allow
+ * space for the implementation to write in the flags
+ * in the first byte). Note that a full 16 bytes should
+ * be allocated, even though the length field will
+ * have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from
+ * IEEE Std 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD
+ * be 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ uint16_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * length of the IV (which must be the same as the
+ * block length of the cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length
+ * of the counter (which must be the same as the block
+ * length of the cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs)
+ * or 16, in which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce,
+ * which can be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+ } cipher;
+ struct {
struct {
- uint32_t offset;
+ uint32_t offset;
/**< Starting point for hash processing, specified as
* number of bytes from start of packet in source
* buffer.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
+ * ignored. The field @ref aad field
* should be set instead.
*
* @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
@@ -417,180 +472,169 @@ struct rte_crypto_sym_op {
* of the AAD data in the source buffer.
*/
- uint32_t length;
+ uint32_t length;
/**< The message length, in bytes, of the source
* buffer that the hash will be computed on.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
+ * ignored. The field @ref aad field should be set
+ * instead.
*
* @note
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
* of operation, this field specifies the length of
* the AAD data in the source buffer.
*/
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
+ } data; /**< Data offsets and length for authentication */
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
-
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result
+ * should be inserted (in the case of digest generation)
+ * or where the purported digest exists (in the case of
+ * digest verification).
+ *
+ * At session creation time, the client specified the
+ * digest result length with the digest_length member
+ * of the @ref rte_crypto_auth_xform structure. For
+ * physical crypto devices the caller must allocate at
+ * least digest_length of physically contiguous memory
+ * at this location.
+ *
+ * For digest generation, the digest result will
+ * overwrite any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is
+ * understood to be in the destination buffer for
+ * digest generation, and in the source buffer for
+ * digest verification. The location of the digest
+ * result in this case is immediately following the
+ * region over which the digest is computed.
+ */
+ phys_addr_t phys_addr;
+ /**< Physical address of digest */
+ uint16_t length;
+ /**< Length of digest */
+ } digest; /**< Digest parameters */
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_sym_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
-
- void *user_data;
- /**< opaque pointer for user data */
-};
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD)
+ * needed for authenticated cipher mechanisms (CCM and
+ * GCM), and to the IV for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is
+ * set up for the session in the @ref
+ * rte_crypto_auth_xform structure as part of the @ref
+ * rte_cryptodev_session_create function call. This
+ * length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM),
+ * the caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset
+ * of one byte into the array, leaving room for the
+ * implementation to write in the flags to the first
+ * byte.
+ *
+ * - the additional authentication data itself should
+ * be written starting at an offset of 18 bytes into
+ * the array, leaving room for the length encoding in
+ * the first two bytes of the second block.
+ *
+ * - the array should be big enough to hold the above
+ * fields, plus any padding to round this up to the
+ * nearest multiple of the block size (16 bytes).
+ * Padding will be added by the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus
+ * any space to round this up to the nearest multiple
+ * of the block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set
+ * to 0. Instead the AAD data should be placed in the
+ * source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint16_t length; /**< Length of digest */
+ } aad;
+ /**< Additional authentication parameters */
+ } auth;
+} __rte_cache_aligned;
/**
- * Reset the fields of a crypto operation to their default values.
+ * Reset the fields of a symmetric operation to their default values.
*
* @param op The crypto operation to be reset.
*/
static inline void
__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
{
+ memset(op, 0, sizeof(*op));
+
op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
}
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type to
+ * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
+ * in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+__rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
+ void *priv_data, uint8_t nb_xforms)
+{
+ struct rte_crypto_sym_xform *xform;
+
+ sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
+
+ do {
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
+ xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
+ } while (xform);
+
+ return sym_op->xform;
+}
+
+
+/**
+ * Attach a session to a symmetric crypto operation
+ *
+ * @param op crypto operation
+ * @param sess cryptodev session
+ */
+static inline int
+__rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
struct rte_cryptodev_sym_session *sess)
{
- op->session = sess;
- op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+ sym_op->session = sess;
+ sym_op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+
+ return 0;
}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index c7fef6a..4632ca3 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1056,3 +1056,79 @@ rte_cryptodev_sym_session_free(uint8_t dev_id,
return NULL;
}
+
+/** Initialise rte_crypto_op mempool element */
+static void
+rte_crypto_op_init(struct rte_mempool *mempool,
+ void *opaque_arg,
+ void *_op_data,
+ __rte_unused unsigned i)
+{
+ struct rte_crypto_op *op = _op_data;
+ enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
+
+ memset(_op_data, 0, mempool->elt_size);
+
+ __rte_crypto_op_reset(op, type);
+
+ op->phys_addr = rte_mem_virt2phy(_op_data);
+ op->mempool = mempool;
+}
+
+
+struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id)
+{
+ struct rte_crypto_op_pool_private *priv;
+
+ unsigned elt_size = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) +
+ priv_size;
+
+ /* lookup mempool in case already allocated */
+ struct rte_mempool *mp = rte_mempool_lookup(name);
+
+ if (mp != NULL) {
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ if (mp->elt_size != elt_size ||
+ mp->cache_size < cache_size ||
+ mp->size < nb_elts ||
+ priv->priv_size < priv_size) {
+ mp = NULL;
+ CDEV_LOG_ERR("Mempool %s already exists but with "
+ "incompatible parameters", name);
+ return NULL;
+ }
+ return mp;
+ }
+
+ mp = rte_mempool_create(
+ name,
+ nb_elts,
+ elt_size,
+ cache_size,
+ sizeof(struct rte_crypto_op_pool_private),
+ NULL,
+ NULL,
+ rte_crypto_op_init,
+ &type,
+ socket_id,
+ 0);
+
+ if (mp == NULL) {
+ CDEV_LOG_ERR("Failed to create mempool %s", name);
+ return NULL;
+ }
+
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ priv->priv_size = priv_size;
+ priv->type = type;
+
+ return mp;
+}
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index f4b38c1..aab8cff 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -40,16 +40,14 @@
* Defines RTE Crypto Device APIs for the provisioning of cipher and
* authentication operations.
*
- * @warning
* @b EXPERIMENTAL: this API may change without prior notice
+ *
*/
#ifdef __cplusplus
extern "C" {
#endif
-#include "stddef.h"
-
#include "rte_crypto.h"
#include "rte_dev.h"
@@ -67,6 +65,9 @@ enum rte_cryptodev_type {
RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT PMD Symmetric Crypto */
};
+
+extern const char **rte_cyptodev_names;
+
/* Logging Macros */
#define CDEV_LOG_ERR(fmt, args...) \
@@ -214,8 +215,6 @@ struct rte_cryptodev_config {
/**
* Configure a device.
*
- * EXPERIMENTAL: this API file may change without prior notice
- *
* This function must be invoked first before any other function in the
* API. This function can also be re-invoked when a device is in the
* stopped state.
@@ -411,12 +410,12 @@ rte_cryptodev_callback_unregister(uint8_t dev_id,
rte_cryptodev_cb_fn cb_fn, void *cb_arg);
-typedef uint16_t (*dequeue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Dequeue processed packets from queue pair of a device. */
-typedef uint16_t (*enqueue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Enqueue packets for processing on queue pair of a device. */
@@ -489,66 +488,65 @@ struct rte_cryptodev_data {
extern struct rte_cryptodev *rte_cryptodevs;
/**
*
- * Dequeue a burst of processed packets from a queue of the crypto device.
- * The dequeued packets are stored in *rte_mbuf* structures whose pointers are
- * supplied in the *pkts* array.
+ * Dequeue a burst of processed crypto operations from a queue on the crypto
+ * device. The dequeued operation are stored in *rte_crypto_op* structures
+ * whose pointers are supplied in the *ops* array.
*
- * The rte_crypto_dequeue_burst() function returns the number of packets
- * actually dequeued, which is the number of *rte_mbuf* data structures
- * effectively supplied into the *pkts* array.
+ * The rte_cryptodev_dequeue_burst() function returns the number of ops
+ * actually dequeued, which is the number of *rte_crypto_op* data structures
+ * effectively supplied into the *ops* array.
*
- * A return value equal to *nb_pkts* indicates that the queue contained
- * at least *rx_pkts* packets, and this is likely to signify that other
- * received packets remain in the input queue. Applications implementing
- * a "retrieve as much received packets as possible" policy can check this
- * specific case and keep invoking the rte_crypto_dequeue_burst() function
- * until a value less than *nb_pkts* is returned.
+ * A return value equal to *nb_ops* indicates that the queue contained
+ * at least *nb_ops* operations, and this is likely to signify that other
+ * processed operations remain in the devices output queue. Applications
+ * implementing a "retrieve as many processed operations as possible" policy
+ * can check this specific case and keep invoking the
+ * rte_cryptodev_dequeue_burst() function until a value less than
+ * *nb_ops* is returned.
*
- * The rte_crypto_dequeue_burst() function does not provide any error
+ * The rte_cryptodev_dequeue_burst() function does not provide any error
* notification to avoid the corresponding overhead.
*
- * @param dev_id The identifier of the device.
+ * @param dev_id The symmetric crypto device identifier
* @param qp_id The index of the queue pair from which to
* retrieve processed packets. The value must be
* in the range [0, nb_queue_pair - 1] previously
* supplied to rte_cryptodev_configure().
- * @param pkts The address of an array of pointers to
- * *rte_mbuf* structures that must be large enough
- * to store *nb_pkts* pointers in it.
- * @param nb_pkts The maximum number of packets to dequeue.
+ * @param ops The address of an array of pointers to
+ * *rte_crypto_op* structures that must be
+ * large enough to store *nb_ops* pointers in it.
+ * @param nb_ops The maximum number of operations to dequeue.
*
* @return
- * - The number of packets actually dequeued, which is the number
- * of pointers to *rte_mbuf* structures effectively supplied to the
- * *pkts* array.
+ * - The number of operations actually dequeued, which is the number
+ * of pointers to *rte_crypto_op* structures effectively supplied to the
+ * *ops* array.
*/
static inline uint16_t
rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
- nb_pkts = (*dev->dequeue_burst)
- (dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ nb_ops = (*dev->dequeue_burst)
+ (dev->data->queue_pairs[qp_id], ops, nb_ops);
- return nb_pkts;
+ return nb_ops;
}
/**
- * Enqueue a burst of packets for processing on a crypto device.
- *
- * The rte_crypto_enqueue_burst() function is invoked to place packets
- * on the queue *queue_id* of the device designated by its *dev_id*.
+ * Enqueue a burst of operations for processing on a crypto device.
*
- * The *nb_pkts* parameter is the number of packets to process which are
- * supplied in the *pkts* array of *rte_mbuf* structures.
+ * The rte_cryptodev_enqueue_burst() function is invoked to place
+ * crypto operations on the queue *qp_id* of the device designated by
+ * its *dev_id*.
*
- * The rte_crypto_enqueue_burst() function returns the number of packets it
- * actually sent. A return value equal to *nb_pkts* means that all packets
- * have been sent.
+ * The *nb_ops* parameter is the number of operations to process which are
+ * supplied in the *ops* array of *rte_crypto_op* structures.
*
- * Each mbuf in the *pkts* array must have a valid *rte_mbuf_offload* structure
- * attached which contains a valid crypto operation.
+ * The rte_cryptodev_enqueue_burst() function returns the number of
+ * operations it actually enqueued for processing. A return value equal to
+ * *nb_ops* means that all packets have been enqueued.
*
* @param dev_id The identifier of the device.
* @param qp_id The index of the queue pair which packets are
@@ -556,25 +554,25 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
* must be in the range [0, nb_queue_pairs - 1]
* previously supplied to
* *rte_cryptodev_configure*.
- * @param pkts The address of an array of *nb_pkts* pointers
- * to *rte_mbuf* structures which contain the
- * output packets.
- * @param nb_pkts The number of packets to transmit.
+ * @param ops The address of an array of *nb_ops* pointers
+ * to *rte_crypto_op* structures which contain
+ * the crypto operations to be processed.
+ * @param nb_ops The number of operations to process.
*
* @return
- * The number of packets actually enqueued on the crypto device. The return
- * value can be less than the value of the *nb_pkts* parameter when the
- * crypto devices queue is full or has been filled up.
- * The number of packets is 0 if the device hasn't been started.
+ * The number of operations actually enqueued on the crypto device. The return
+ * value can be less than the value of the *nb_ops* parameter when the
+ * crypto devices queue is full or if invalid parameters are specified in
+ * a *rte_crypto_op*.
*/
static inline uint16_t
rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
return (*dev->enqueue_burst)(
- dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ dev->data->queue_pairs[qp_id], ops, nb_ops);
}
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index a46af6f..6de6c7d 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -1,4 +1,4 @@
-DPDK_2.2 {
+DPDK_16.04 {
global:
rte_cryptodevs;
@@ -27,6 +27,7 @@ DPDK_2.2 {
rte_cryptodev_queue_pair_setup;
rte_cryptodev_queue_pair_start;
rte_cryptodev_queue_pair_stop;
+ rte_crypto_op_pool_create;
local: *;
};
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* [dpdk-dev] [PATCH v9 5/5] mbuf_offload: remove library
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 0/5] " Fiona Trahe
` (5 preceding siblings ...)
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 4/5] cryptodev: change burst API to be crypto op oriented Fiona Trahe
@ 2016-03-10 15:41 ` Fiona Trahe
2016-03-14 8:59 ` [dpdk-dev] [PATCH v8 0/5] cryptodev API changes Cao, Min
7 siblings, 0 replies; 62+ messages in thread
From: Fiona Trahe @ 2016-03-10 15:41 UTC (permalink / raw)
To: dev
From: Declan Doherty <declan.doherty@intel.com>
As cryptodev library does not depend on mbuf_offload library
any longer, this patch removes it.
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
---
MAINTAINERS | 4 -
app/test/test_cryptodev.c | 1 -
app/test/test_cryptodev_perf.c | 1 -
config/common_base | 7 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 1 -
drivers/crypto/qat/qat_crypto.c | 1 -
examples/l2fwd-crypto/main.c | 1 -
lib/Makefile | 1 -
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 ----
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 -------
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 ---------------------
.../rte_mbuf_offload_version.map | 7 -
14 files changed, 493 deletions(-)
delete mode 100644 lib/librte_mbuf_offload/Makefile
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
diff --git a/MAINTAINERS b/MAINTAINERS
index e253bf7..421c317 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -223,10 +223,6 @@ F: lib/librte_mbuf/
F: doc/guides/prog_guide/mbuf_lib.rst
F: app/test/test_mbuf.c
-Packet buffer offload - EXPERIMENTAL
-M: Declan Doherty <declan.doherty@intel.com>
-F: lib/librte_mbuf_offload/
-
Ethernet API
M: Thomas Monjalon <thomas.monjalon@6wind.com>
F: lib/librte_ether/
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 55367df..3240ecd 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -35,7 +35,6 @@
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#include <rte_mbuf_offload.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index b43f9aa..b3f4fd9 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -32,7 +32,6 @@
#include <rte_common.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
diff --git a/config/common_base b/config/common_base
index c73f71a..0a02924 100644
--- a/config/common_base
+++ b/config/common_base
@@ -361,13 +361,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
#
-# Compile librte_mbuf_offload
-# EXPERIMENTAL: API may change without prior notice
-#
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n
-
-#
# Compile librte_timer
#
CONFIG_RTE_LIBRTE_TIMER=y
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 7a91001..f626386 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -104,7 +104,6 @@ There are many libraries, so their headers may be grouped by topics:
- **containers**:
[mbuf] (@ref rte_mbuf.h),
- [mbuf_offload] (@ref rte_mbuf_offload.h),
[ring] (@ref rte_ring.h),
[distributor] (@ref rte_distributor.h),
[reorder] (@ref rte_reorder.h),
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index f28b29f..9599cc4 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -37,7 +37,6 @@
#include <rte_dev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
-#include <rte_mbuf_offload.h>
#include "rte_aesni_mb_pmd_private.h"
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 11f7fb2..3533f37 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -59,7 +59,6 @@
#include <rte_mbuf.h>
#include <rte_string_fns.h>
#include <rte_spinlock.h>
-#include <rte_mbuf_offload.h>
#include <rte_hexdump.h>
#include "qat_logs.h"
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 6aaa7c0..65e90b5 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -62,7 +62,6 @@
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_memcpy.h>
#include <rte_memory.h>
#include <rte_mempool.h>
diff --git a/lib/Makefile b/lib/Makefile
index 6840f87..f254dba 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -36,7 +36,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_EAL) += librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_RING) += librte_ring
DIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += librte_mempool
DIRS-$(CONFIG_RTE_LIBRTE_MBUF) += librte_mbuf
-DIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += librte_mbuf_offload
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += librte_timer
DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile
DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index e39ad28..52f96c3 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -728,9 +728,6 @@ typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */
typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
* with a single assignment */
-/** Opaque rte_mbuf_offload structure declarations */
-struct rte_mbuf_offload;
-
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
@@ -847,9 +844,6 @@ struct rte_mbuf {
/** Timesync flags for use with IEEE1588. */
uint16_t timesync;
-
- /* Chain of off-load operations to perform on mbuf */
- struct rte_mbuf_offload *offload_ops;
} __rte_cache_aligned;
static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
diff --git a/lib/librte_mbuf_offload/Makefile b/lib/librte_mbuf_offload/Makefile
deleted file mode 100644
index acdb449..0000000
--- a/lib/librte_mbuf_offload/Makefile
+++ /dev/null
@@ -1,52 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2015 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_mbuf_offload.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-EXPORT_MAP := rte_mbuf_offload_version.map
-
-LIBABIVER := 1
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) := rte_mbuf_offload.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD)-include := rte_mbuf_offload.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_cryptodev
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.c b/lib/librte_mbuf_offload/rte_mbuf_offload.c
deleted file mode 100644
index 5c0c9dd..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <rte_common.h>
-
-#include "rte_mbuf_offload.h"
-
-/** Initialize rte_mbuf_offload structure */
-static void
-rte_pktmbuf_offload_init(struct rte_mempool *mp,
- __rte_unused void *opaque_arg,
- void *_op_data,
- __rte_unused unsigned i)
-{
- struct rte_mbuf_offload *ol = _op_data;
-
- memset(_op_data, 0, mp->elt_size);
-
- ol->type = RTE_PKTMBUF_OL_NOT_SPECIFIED;
- ol->mp = mp;
-}
-
-
-struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id)
-{
- struct rte_pktmbuf_offload_pool_private *priv;
- unsigned elt_size = sizeof(struct rte_mbuf_offload) + priv_size;
-
-
- /* lookup mempool in case already allocated */
- struct rte_mempool *mp = rte_mempool_lookup(name);
-
- if (mp != NULL) {
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- if (priv->offload_priv_size < priv_size ||
- mp->elt_size != elt_size ||
- mp->cache_size < cache_size ||
- mp->size < size) {
- mp = NULL;
- return NULL;
- }
- return mp;
- }
-
- mp = rte_mempool_create(
- name,
- size,
- elt_size,
- cache_size,
- sizeof(struct rte_pktmbuf_offload_pool_private),
- NULL,
- NULL,
- rte_pktmbuf_offload_init,
- NULL,
- socket_id,
- 0);
-
- if (mp == NULL)
- return NULL;
-
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- priv->offload_priv_size = priv_size;
- return mp;
-}
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
deleted file mode 100644
index 5ce6058..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_MBUF_OFFLOAD_H_
-#define _RTE_MBUF_OFFLOAD_H_
-
-/**
- * @file
- * RTE mbuf offload
- *
- * The rte_mbuf_offload library provides the ability to specify a device generic
- * off-load operation independent of the current Rx/Tx Ethernet offloads
- * supported within the rte_mbuf structure, and add supports for multiple
- * off-load operations and offload device types.
- *
- * The rte_mbuf_offload specifies the particular off-load operation type, such
- * as a crypto operation, and provides a container for the operations
- * parameter's inside the op union. These parameters are then used by the
- * device which supports that operation to perform the specified offload.
- *
- * This library provides an API to create pre-allocated mempool of offload
- * operations, with supporting allocate and free functions. It also provides
- * APIs for attaching an offload to a mbuf, as well as an API to retrieve a
- * specified offload type from an mbuf offload chain.
- *
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- */
-
-#include <rte_mbuf.h>
-#include <rte_crypto.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** packet mbuf offload operation types */
-enum rte_mbuf_ol_op_type {
- RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
- /**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO_SYM
- /**< Crypto offload operation */
-};
-
-/**
- * Generic packet mbuf offload
- * This is used to specify a offload operation to be performed on a rte_mbuf.
- * Multiple offload operations can be chained to the same mbuf, but only a
- * single offload operation of a particular type can be in the chain
- */
-struct rte_mbuf_offload {
- struct rte_mbuf_offload *next; /**< next offload in chain */
- struct rte_mbuf *m; /**< mbuf offload is attached to */
- struct rte_mempool *mp; /**< mempool offload allocated from */
-
- enum rte_mbuf_ol_op_type type; /**< offload type */
- union {
- struct rte_crypto_sym_op crypto; /**< Crypto operation */
- } op;
-};
-
-/**< private data structure belonging to packet mbug offload mempool */
-struct rte_pktmbuf_offload_pool_private {
- uint16_t offload_priv_size;
- /**< Size of private area in each mbuf_offload. */
-};
-
-
-/**
- * Creates a mempool of rte_mbuf_offload objects
- *
- * EXPERIMENTAL: this API file may change without prior notice
- *
- * @param name mempool name
- * @param size number of objects in mempool
- * @param cache_size cache size of objects for each core
- * @param priv_size size of private data to be allocated with each
- * rte_mbuf_offload object
- * @param socket_id Socket on which to allocate mempool objects
- *
- * @return
- * - On success returns a valid mempool of rte_mbuf_offload objects
- * - On failure return NULL
- */
-extern struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id);
-
-
-/**
- * Returns private data size allocated with each rte_mbuf_offload object by
- * the mempool
- *
- * @param mpool rte_mbuf_offload mempool
- *
- * @return private data size
- */
-static inline uint16_t
-__rte_pktmbuf_offload_priv_size(struct rte_mempool *mpool)
-{
- struct rte_pktmbuf_offload_pool_private *priv =
- (struct rte_pktmbuf_offload_pool_private *)rte_mempool_get_priv(mpool);
-
- return priv->offload_priv_size;
-}
-
-/**
- * Get specified off-load operation type from mbuf.
- *
- * @param m packet mbuf.
- * @param type offload operation type requested.
- *
- * @return
- * - On success retruns rte_mbuf_offload pointer
- * - On failure returns NULL
- *
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_get(struct rte_mbuf *m, enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol;
-
- for (ol = m->offload_ops; ol != NULL; ol = ol->next)
- if (ol->type == type)
- return ol;
-
- return ol;
-}
-
-/**
- * Attach a rte_mbuf_offload to a mbuf. We only support a single offload of any
- * one type in our chain of offloads.
- *
- * @param m packet mbuf.
- * @param ol rte_mbuf_offload strucutre to be attached
- *
- * @returns
- * - On success returns the pointer to the offload we just added
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_attach(struct rte_mbuf *m, struct rte_mbuf_offload *ol)
-{
- struct rte_mbuf_offload **ol_last;
-
- for (ol_last = &m->offload_ops; ol_last[0] != NULL;
- ol_last = &ol_last[0]->next)
- if (ol_last[0]->type == ol->type)
- return NULL;
-
- ol_last[0] = ol;
- ol_last[0]->m = m;
- ol_last[0]->next = NULL;
-
- return ol_last[0];
-}
-
-
-/** Rearms rte_mbuf_offload default parameters */
-static inline void
-__rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
- enum rte_mbuf_ol_op_type type)
-{
- ol->m = NULL;
- ol->type = type;
-
- switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO_SYM:
- __rte_crypto_sym_op_reset(&ol->op.crypto); break;
- default:
- break;
- }
-}
-
-/** Allocate rte_mbuf_offload from mempool */
-static inline struct rte_mbuf_offload *
-__rte_pktmbuf_offload_raw_alloc(struct rte_mempool *mp)
-{
- void *buf = NULL;
-
- if (rte_mempool_get(mp, &buf) < 0)
- return NULL;
-
- return (struct rte_mbuf_offload *)buf;
-}
-
-/**
- * Allocate a rte_mbuf_offload with a specified operation type from
- * rte_mbuf_offload mempool
- *
- * @param mpool rte_mbuf_offload mempool
- * @param type offload operation type
- *
- * @returns
- * - On success returns a valid rte_mbuf_offload structure
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_alloc(struct rte_mempool *mpool,
- enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol = __rte_pktmbuf_offload_raw_alloc(mpool);
-
- if (ol != NULL)
- __rte_pktmbuf_offload_reset(ol, type);
-
- return ol;
-}
-
-/**
- * free rte_mbuf_offload structure
- */
-static inline void
-rte_pktmbuf_offload_free(struct rte_mbuf_offload *ol)
-{
- if (ol != NULL && ol->mp != NULL)
- rte_mempool_put(ol->mp, ol);
-}
-
-/**
- * Checks if the private data of a rte_mbuf_offload has enough capacity for
- * requested size
- *
- * @returns
- * - if sufficient space available returns pointer to start of private data
- * - if insufficient space returns NULL
- */
-static inline void *
-__rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
- uint16_t size)
-{
- uint16_t priv_size;
-
- if (likely(ol->mp != NULL)) {
- priv_size = __rte_pktmbuf_offload_priv_size(ol->mp);
-
- if (likely(priv_size >= size))
- return (void *)(ol + 1);
- }
- return NULL;
-}
-
-/**
- * Allocate space for crypto xforms in the private data space of the
- * rte_mbuf_offload. This also defaults the crypto xform type and configures
- * the chaining of the xform in the crypto operation
- *
- * @return
- * - On success returns pointer to first crypto xform in crypto operations chain
- * - On failure returns NULL
- */
-static inline struct rte_crypto_sym_xform *
-rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
- unsigned nb_xforms)
-{
- struct rte_crypto_sym_xform *xform;
- void *priv_data;
- uint16_t size;
-
- size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
- priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
-
- if (priv_data == NULL)
- return NULL;
-
- ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
-
- do {
- xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
- xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
- } while (xform);
-
- return ol->op.crypto.xform;
-}
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MBUF_OFFLOAD_H_ */
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map b/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
deleted file mode 100644
index 3d3b06a..0000000
--- a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map
+++ /dev/null
@@ -1,7 +0,0 @@
-DPDK_2.2 {
- global:
-
- rte_pktmbuf_offload_pool_create;
-
- local: *;
-};
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v9 4/5] cryptodev: change burst API to be crypto op oriented
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 4/5] cryptodev: change burst API to be crypto op oriented Fiona Trahe
@ 2016-03-10 16:03 ` Thomas Monjalon
2016-03-10 16:13 ` Trahe, Fiona
0 siblings, 1 reply; 62+ messages in thread
From: Thomas Monjalon @ 2016-03-10 16:03 UTC (permalink / raw)
To: Fiona Trahe; +Cc: dev
2016-03-10 15:41, Fiona Trahe:
> lib/librte_cryptodev/rte_crypto_sym.h | 374 ++++++------
Sorry there is another issue:
rte_crypto_sym.h:622: warning: argument 'op' of command @param is not found in the argument list of __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op, struct rte_cryptodev_sym_session *sess)
rte_crypto_sym.h:628: warning: The following parameters of __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op, struct rte_cryptodev_sym_session *sess) are not documented:
parameter 'sym_op'
rte_crypto_sym.h:592: warning: unable to resolve reference to `rte_cryptodev_session_create' for \ref command
I've fixed it with the following changes:
* The length of the data pointed to by this field is
* set up for the session in the @ref
* rte_crypto_auth_xform structure as part of the @ref
- * rte_cryptodev_session_create function call. This
- * length must not exceed 240 bytes.
+ * rte_cryptodev_sym_session_create function call.
+ * This length must not exceed 240 bytes.
/**
* Attach a session to a symmetric crypto operation
*
- * @param op crypto operation
+ * @param sym_op crypto operation
* @param sess cryptodev session
*/
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v9 4/5] cryptodev: change burst API to be crypto op oriented
2016-03-10 16:03 ` Thomas Monjalon
@ 2016-03-10 16:13 ` Trahe, Fiona
0 siblings, 0 replies; 62+ messages in thread
From: Trahe, Fiona @ 2016-03-10 16:13 UTC (permalink / raw)
To: Thomas Monjalon; +Cc: dev
HI Thomas,
> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas.monjalon@6wind.com]
> Sent: Thursday, March 10, 2016 4:03 PM
> To: Trahe, Fiona
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v9 4/5] cryptodev: change burst API to be crypto
> op oriented
>
> 2016-03-10 15:41, Fiona Trahe:
> > lib/librte_cryptodev/rte_crypto_sym.h | 374 ++++++------
>
> Sorry there is another issue:
>
> rte_crypto_sym.h:622: warning: argument 'op' of command @param is not
> found in the argument list of __rte_crypto_sym_op_attach_sym_session(struct
> rte_crypto_sym_op *sym_op, struct rte_cryptodev_sym_session *sess)
> rte_crypto_sym.h:628: warning: The following parameters of
> __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
> struct rte_cryptodev_sym_session *sess) are not documented:
> parameter 'sym_op'
> rte_crypto_sym.h:592: warning: unable to resolve reference to
> `rte_cryptodev_session_create' for \ref command
>
> I've fixed it with the following changes:
>
> * The length of the data pointed to by this field is
> * set up for the session in the @ref
> * rte_crypto_auth_xform structure as part of the @ref
> - * rte_cryptodev_session_create function call. This
> - * length must not exceed 240 bytes.
> + * rte_cryptodev_sym_session_create function call.
> + * This length must not exceed 240 bytes.
>
> /**
> * Attach a session to a symmetric crypto operation
> *
> - * @param op crypto operation
> + * @param sym_op crypto operation
> * @param sess cryptodev session
> */
This is great fun :)
Do you need me to respin a v10 with that fix?
Or have you applied with the fix?
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v9 0/5] cryptodev API changes
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 " Fiona Trahe
@ 2016-03-10 16:14 ` Thomas Monjalon
0 siblings, 0 replies; 62+ messages in thread
From: Thomas Monjalon @ 2016-03-10 16:14 UTC (permalink / raw)
To: Fiona Trahe, Pablo de Lara, declan.doherty; +Cc: dev
> Declan Doherty (2):
> cryptodev: change burst API to be crypto op oriented
> mbuf_offload: remove library
>
> Fiona Trahe (3):
> cryptodev: code cleanup
> cryptodev: refactor to partition common from symmetric-specific code
> cryptodev: remove unused phys_addr field from key
Applied, thanks for the big work
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v8 0/5] cryptodev API changes
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 0/5] " Fiona Trahe
` (6 preceding siblings ...)
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 5/5] mbuf_offload: remove library Fiona Trahe
@ 2016-03-14 8:59 ` Cao, Min
7 siblings, 0 replies; 62+ messages in thread
From: Cao, Min @ 2016-03-14 8:59 UTC (permalink / raw)
To: Trahe, Fiona, dev
Tested-by: Min Cao <min.cao@intel.com>
- Tested Commit: 4c387fcdf7776d3bb9f47789580a57bd18d4dbf3
- OS: Fedora20 3.11.10-301.fc20.x86_64
- GCC: gcc (GCC) 4.8.3
- CPU: Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
- NIC: Niantic
- Default x86_64-native-linuxapp-gcc configuration
- Prerequisites:
- Total 23 cases, 23 passed, 0 failed
- test case 1: QAT Unit test
Total 13 cases, 13 passed, 0 failed
- test case 2: AES_NI Unit test
Total 10 cases, 10 passed, 0 failed
-----Original Message-----
From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Fiona Trahe
Sent: Thursday, March 10, 2016 9:43 PM
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v8 0/5] cryptodev API changes
This patch set separates the symmetric crypto operations from generic operations and then modifies the cryptodev burst API to accept bursts of rte_crypto_op rather than rte_mbufs.
v8:
- split patchset for easier review
- fix broken /examples/l2fwd-crypto build in intermediate patch
- split removal of rte_mbuf_offload into separate commit
v7:
- remove trailing spaces introduced in v6
- rebase against recent config file changes
v6:
- restore EXPERIMENTAL label to cryptodev. Will handle removal in separate thread.
(email subject was incorrect in v5, so v5 hasn't arrived in patchwork, therefore v6 is in-reply-to v4 message id)
V5:
- updates .map file
- removes EXPERIMENTAL label from rte_cryptodev.h
V4:
- Fixes for issues introduced in __rte_crypto_op_raw_bulk_alloc in V3 patcheset.
- Typo fix in cached attribute on rte_crypto_op structure.
V3:
- Addresses V2 comments
- Rebased for head
Declan Doherty (2):
cryptodev: change burst API to be crypto op oriented
mbuf_offload: remove library
Fiona Trahe (3):
cryptodev: code cleanup
cryptodev: refactor to partition common from symmetric-specific code
cryptodev: remove unused phys_addr field from key
MAINTAINERS | 4 -
app/test/test_cryptodev.c | 883 +++++++++++----------
app/test/test_cryptodev.h | 5 +-
app/test/test_cryptodev_perf.c | 270 ++++---
config/common_base | 7 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 190 +++--
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 30 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 10 +-
drivers/crypto/qat/qat_crypto.c | 160 ++--
drivers/crypto/qat/qat_crypto.h | 14 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 234 +++---
lib/Makefile | 1 -
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 819 ++++++++-----------
lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
lib/librte_cryptodev/rte_cryptodev.h | 189 ++---
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 7 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 --------
.../rte_mbuf_offload_version.map | 7 -
26 files changed, 2130 insertions(+), 1967 deletions(-) create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
delete mode 100644 lib/librte_mbuf_offload/Makefile delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v7 0/2] cryptodev API changes
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 " Fiona Trahe
` (7 preceding siblings ...)
2016-03-10 13:43 ` [dpdk-dev] [PATCH v8 5/5] mbuf_offload: remove library Fiona Trahe
@ 2016-03-15 5:21 ` Cao, Min
8 siblings, 0 replies; 62+ messages in thread
From: Cao, Min @ 2016-03-15 5:21 UTC (permalink / raw)
To: Trahe, Fiona, dev
Tested-by: Min Cao <min.cao@intel.com>
- Tested Commit: e702183bab5a13f6e7d77be0d414914319469020
- OS: Fedora20 3.11.10-301.fc20.x86_64
- GCC: gcc (GCC) 4.8.3
- CPU: Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
- NIC: Niantic
- Default x86_64-native-linuxapp-gcc configuration
- Prerequisites:
- Total 24 cases, 24 passed, 0 failed
- test case 1: QAT Unit test
Total 13 cases, 13 passed, 0 failed
- test case 2: AES_NI Unit test
Total 10 cases, 10 passed, 0 failed
- test case 3: l2fwd-crypto
Total 1 cases, 1 passed, 0 failed
-----Original Message-----
From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Fiona Trahe
Sent: Monday, March 07, 2016 7:50 PM
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v7 0/2] cryptodev API changes
This patch set separates the symmetric crypto operations from generic operations and then modifies the cryptodev burst API to accept bursts of rte_crypto_op rather than rte_mbufs.
v7:
- remove trailing spaces introduced in v6
- rebase against recent config file changes
v6:
- restore EXPERIMENTAL label to cryptodev. Will handle removal in separate thread.
(email subject was incorrect in v5, so v5 hasn't arrived in patchwork, therefore v6 is in-reply-to v4 message id)
V5:
- updates .map file
- removes EXPERIMENTAL label from rte_cryptodev.h
V4:
- Fixes for issues introduced in __rte_crypto_op_raw_bulk_alloc in V3 patcheset.
- Typo fix in cached attribute on rte_crypto_op structure.
V3:
- Addresses V2 comments
- Rebased for head
Declan Doherty (1):
cryptodev: change burst API to be crypto op oriented
Fiona Trahe (2):
cryptodev: API tidy and changes to support future extensions
cryptodev: change burst API to be crypto op oriented
MAINTAINERS | 4 -
app/test/test_cryptodev.c | 894 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 270 ++++---
config/common_base | 7 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 199 ++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 18 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 6 +-
drivers/crypto/qat/qat_crypto.c | 150 ++--
drivers/crypto/qat/qat_crypto.h | 14 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 300 ++++---
lib/Makefile | 1 -
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 819 +++++++------------
lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
lib/librte_cryptodev/rte_cryptodev.h | 189 ++---
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 7 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 -------
.../rte_mbuf_offload_version.map | 7 -
26 files changed, 2145 insertions(+), 2016 deletions(-) create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
delete mode 100644 lib/librte_mbuf_offload/Makefile delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v6 0/2] cryptodev API changes
2016-03-04 18:29 ` [dpdk-dev] [PATCH v6 0/2] cryptodev API changes Fiona Trahe
` (2 preceding siblings ...)
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 2/2] cryptodev: change burst API to be crypto op oriented Fiona Trahe
@ 2016-03-15 6:46 ` Cao, Min
3 siblings, 0 replies; 62+ messages in thread
From: Cao, Min @ 2016-03-15 6:46 UTC (permalink / raw)
To: Trahe, Fiona, dev
Tested-by: Min Cao <min.cao@intel.com>
- Tested Commit: e45ef10c34540c3f543689d833db8bb7296d9e85
- OS: Fedora20 3.11.10-301.fc20.x86_64
- GCC: gcc (GCC) 4.8.3
- CPU: Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
- NIC: Niantic
- Default x86_64-native-linuxapp-gcc configuration
- Prerequisites:
- Total 24 cases, 24 passed, 0 failed
- test case 1: QAT Unit test
Total 13 cases, 13 passed, 0 failed
- test case 2: AES_NI Unit test
Total 10 cases, 10 passed, 0 failed
- test case 3: l2fwd-crypto
Total 1 cases, 1 passed, 0 failed
-----Original Message-----
From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Fiona Trahe
Sent: Saturday, March 05, 2016 2:30 AM
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v6 0/2] cryptodev API changes
This patch set separates the symmetric crypto operations from generic operations and then modifies the cryptodev burst API to accept bursts of rte_crypto_op rather than rte_mbufs.
v6:
- restore EXPERIMENTAL label to cryptodev. Will handle removal in separate thread.
(email subject was incorrect in v5, so v5 hasn't arrived in patchwork, therefore v6 is in-reply-to v4 message id)
V5:
- updates .map file
- removes EXPERIMENTAL label from rte_cryptodev.h
V4:
- Fixes for issues introduced in __rte_crypto_op_raw_bulk_alloc in V3 patcheset.
- Typo fix in cached attribute on rte_crypto_op structure.
V3:
- Addresses V2 comments
- Rebased for head
Declan Doherty (1):
cryptodev: change burst API to be crypto op oriented
Fiona Trahe (1):
cryptodev: API tidy and changes to support future extensions
MAINTAINERS | 4 -
app/test/test_cryptodev.c | 894 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 270 ++++---
config/common_bsdapp | 9 +-
config/common_linuxapp | 9 +-
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 199 ++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 18 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 6 +-
drivers/crypto/qat/qat_crypto.c | 150 ++--
drivers/crypto/qat/qat_crypto.h | 14 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 300 ++++---
lib/Makefile | 1 -
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 819 +++++++------------
lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
lib/librte_cryptodev/rte_cryptodev.h | 191 ++---
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 7 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 -------
.../rte_mbuf_offload_version.map | 7 -
27 files changed, 2148 insertions(+), 2026 deletions(-) create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
delete mode 100644 lib/librte_mbuf_offload/Makefile delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v5 0/2] cryptodev API changes
2016-03-04 17:17 ` [dpdk-dev] [PATCH v5 " Fiona Trahe
2016-03-04 17:38 ` Thomas Monjalon
2016-03-04 17:39 ` Trahe, Fiona
@ 2016-03-15 6:48 ` Cao, Min
2 siblings, 0 replies; 62+ messages in thread
From: Cao, Min @ 2016-03-15 6:48 UTC (permalink / raw)
To: Trahe, Fiona, dev
Tested-by: Min Cao <min.cao@intel.com>
- Tested Commit: e45ef10c34540c3f543689d833db8bb7296d9e85
- OS: Fedora20 3.11.10-301.fc20.x86_64
- GCC: gcc (GCC) 4.8.3
- CPU: Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
- NIC: Niantic
- Default x86_64-native-linuxapp-gcc configuration
- Prerequisites:
- Total 23 cases, 23 passed, 0 failed
- test case 1: QAT Unit test
Total 13 cases, 13 passed, 0 failed
- test case 2: AES_NI Unit test
Total 10 cases, 10 passed, 0 failed
-----Original Message-----
From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Fiona Trahe
Sent: Saturday, March 05, 2016 1:18 AM
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v5 0/2] cryptodev API changes
This patch set separates the symmetric crypto operations from generic operations and then modifies the cryptodev burst API to accept bursts of rte_crypto_op rather than rte_mbufs.
V5:
- updates .map file
- removes EXPERIMENTAL label from rte_cryptodev.h
V4:
- Fixes for issues introduced in __rte_crypto_op_raw_bulk_alloc in V3 patcheset.
- Typo fix in cached attribute on rte_crypto_op structure.
V3:
- Addresses V2 comments
- Rebased for head
Declan Doherty (1):
cryptodev: change burst API to be crypto op oriented
Fiona Trahe (1):
cryptodev: API tidy and changes to support future extensions
MAINTAINERS | 6 +-
app/test/test_cryptodev.c | 894 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 270 ++++---
config/common_bsdapp | 8 -
config/common_linuxapp | 8 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 199 ++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 18 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 6 +-
drivers/crypto/qat/qat_crypto.c | 150 ++--
drivers/crypto/qat/qat_crypto.h | 14 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 300 ++++---
lib/Makefile | 1 -
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 819 +++++++------------
lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
lib/librte_cryptodev/rte_cryptodev.h | 189 ++---
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 7 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 -------
.../rte_mbuf_offload_version.map | 7 -
27 files changed, 2145 insertions(+), 2027 deletions(-) create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
delete mode 100644 lib/librte_mbuf_offload/Makefile delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
--
2.1.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v4 0/2] cryptodev API changes
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 " Declan Doherty
` (8 preceding siblings ...)
2016-03-04 18:29 ` [dpdk-dev] [PATCH v6 2/2] cryptodev: change burst API to be crypto op oriented Fiona Trahe
@ 2016-03-15 6:57 ` Cao, Min
9 siblings, 0 replies; 62+ messages in thread
From: Cao, Min @ 2016-03-15 6:57 UTC (permalink / raw)
To: Doherty, Declan, dev
Tested-by: Min Cao <min.cao@intel.com>
- Tested Commit: e45ef10c34540c3f543689d833db8bb7296d9e85
- OS: Fedora20 3.11.10-301.fc20.x86_64
- GCC: gcc (GCC) 4.8.3
- CPU: Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
- NIC: Niantic
- Default x86_64-native-linuxapp-gcc configuration
- Prerequisites:
- Total 24 cases, 24 passed, 0 failed
- test case 1: QAT Unit test
Total 13 cases, 13 passed, 0 failed
- test case 2: AES_NI Unit test
Total 10 cases, 10 passed, 0 failed
- test case 3: l2fwd-crypto
Total 1 cases, 1 passed, 0 failed
-----Original Message-----
From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Declan Doherty
Sent: Tuesday, March 01, 2016 12:52 AM
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v4 0/2] cryptodev API changes
This patch set separates the symmetric crypto operations from generic operations and then modifies the cryptodev burst API to accept bursts of rte_crypto_op rather than rte_mbufs.
V4:
- Fixes for issues introduced in __rte_crypto_op_raw_bulk_alloc in V3 patcheset.
- Typo fix in cached attribute on rte_crypto_op structure.
V3:
- Addresses V2 comments
- Rebased for head
Declan Doherty (1):
cryptodev: change burst API to be crypto op oriented
Fiona Trahe (1):
cryptodev: API tidy and changes to support future extensions
MAINTAINERS | 6 +-
app/test/test_cryptodev.c | 894 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 270 ++++---
config/common_bsdapp | 8 -
config/common_linuxapp | 8 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 199 ++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 18 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 6 +-
drivers/crypto/qat/qat_crypto.c | 150 ++--
drivers/crypto/qat/qat_crypto.h | 14 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 300 ++++---
lib/Makefile | 1 -
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 819 +++++++------------
lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
lib/librte_cryptodev/rte_cryptodev.h | 185 ++---
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 3 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 -------
.../rte_mbuf_offload_version.map | 7 -
27 files changed, 2143 insertions(+), 2021 deletions(-) create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
delete mode 100644 lib/librte_mbuf_offload/Makefile delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v3 0/2] cryptodev API changes
2016-02-26 17:30 ` [dpdk-dev] [PATCH v3 0/2] cryptodev API changes Declan Doherty
` (3 preceding siblings ...)
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 " Declan Doherty
@ 2016-03-15 7:07 ` Cao, Min
4 siblings, 0 replies; 62+ messages in thread
From: Cao, Min @ 2016-03-15 7:07 UTC (permalink / raw)
To: Doherty, Declan, dev
Tested-by: Min Cao <min.cao@intel.com>
- Tested Commit: e45ef10c34540c3f543689d833db8bb7296d9e85
- OS: Fedora20 3.11.10-301.fc20.x86_64
- GCC: gcc (GCC) 4.8.3
- CPU: Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
- NIC: Niantic
- Default x86_64-native-linuxapp-gcc configuration
- Prerequisites:
- Total 24 cases, 24 passed, 0 failed
- test case 1: QAT Unit test
Total 13 cases, 13 passed, 0 failed
- test case 2: AES_NI Unit test
Total 10 cases, 10 passed, 0 failed
- test case 3: l2fwd-crypto
Total 1 cases, 1 passed, 0 failed
-----Original Message-----
From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Declan Doherty
Sent: Saturday, February 27, 2016 1:30 AM
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v3 0/2] cryptodev API changes
This patch set separates the symmetric crypto operations from generic operations and then modifies the cryptodev burst API to accept bursts of rte_crypto_op rather than rte_mbufs.
V3:
- Addresses V2 comments
- Rebased for head
Declan Doherty (1):
cryptodev: change burst API to be crypto op oriented
Fiona Trahe (1):
cryptodev: API tidy and changes to support future extensions
MAINTAINERS | 6 +-
app/test/test_cryptodev.c | 894 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 270 ++++---
config/common_bsdapp | 8 -
config/common_linuxapp | 8 -
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 199 ++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 18 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 6 +-
drivers/crypto/qat/qat_crypto.c | 150 ++--
drivers/crypto/qat/qat_crypto.h | 14 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 300 ++++---
lib/Makefile | 1 -
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 822 ++++++++-----------
lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 115 ++-
lib/librte_cryptodev/rte_cryptodev.h | 185 ++---
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 3 +-
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 310 -------
.../rte_mbuf_offload_version.map | 7 -
27 files changed, 2146 insertions(+), 2021 deletions(-) create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
delete mode 100644 lib/librte_mbuf_offload/Makefile delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [dpdk-dev] [PATCH v2 0/2] cryptodev API changes
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 0/2] cryptodev API changes Declan Doherty
` (2 preceding siblings ...)
2016-02-26 17:30 ` [dpdk-dev] [PATCH v3 0/2] cryptodev API changes Declan Doherty
@ 2016-03-15 7:48 ` Cao, Min
3 siblings, 0 replies; 62+ messages in thread
From: Cao, Min @ 2016-03-15 7:48 UTC (permalink / raw)
To: Doherty, Declan, dev
Tested-by: Min Cao <min.cao@intel.com>
- Tested Commit: e45ef10c34540c3f543689d833db8bb7296d9e85
- OS: Fedora20 3.11.10-301.fc20.x86_64
- GCC: gcc (GCC) 4.8.3
- CPU: Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
- NIC: Niantic
- Default x86_64-native-linuxapp-gcc configuration
- Prerequisites:
- Total 24 cases, 24 passed, 0 failed
- test case 1: QAT Unit test
Total 13 cases, 13 passed, 0 failed
- test case 2: AES_NI Unit test
Total 10 cases, 10 passed, 0 failed
- test case 3: l2fwd-crypto
Total 1 cases, 1 passed, 0 failed
-----Original Message-----
From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Declan Doherty
Sent: Friday, February 19, 2016 7:01 PM
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v2 0/2] cryptodev API changes
This patch set separates the symmetric crypto operations from generic operations and then modifies the cryptodev burst API to accept bursts of rte_crypto_op rather than rte_mbufs.
This patch set is dependent on the following bug fixes patches:
aesni_mb: strict-aliasing rule compilation fix
(http://dpdk.org/ml/archives/dev/2016-February/033193.html)
qat:fix build on 32-bit systems
(http://dpdk.org/ml/archives/dev/2016-February/033442.html)
aesni_mb: fix wrong return value
(http://dpdk.org/ml/archives/dev/2016-February/033193.html)
Various fixes for L2fwd-crypto
Declan Doherty (1):
cryptodev: change burst API to be crypto op oriented
Fiona Trahe (1):
cryptodev: API tidy and changes to support future extensions
MAINTAINERS | 4 -
app/test/test_cryptodev.c | 890 +++++++++++----------
app/test/test_cryptodev.h | 9 +-
app/test/test_cryptodev_perf.c | 270 ++++---
config/common_bsdapp | 7 -
config/common_linuxapp | 11 +-
doc/api/doxy-api-index.md | 1 -
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 199 ++---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 18 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 6 +-
drivers/crypto/qat/qat_crypto.c | 154 ++--
drivers/crypto/qat/qat_crypto.h | 14 +-
drivers/crypto/qat/rte_qat_cryptodev.c | 8 +-
examples/l2fwd-crypto/main.c | 281 ++++---
lib/Makefile | 1 -
lib/librte_cryptodev/Makefile | 1 +
lib/librte_cryptodev/rte_crypto.h | 802 +++++++------------
lib/librte_cryptodev/rte_crypto_sym.h | 642 +++++++++++++++
lib/librte_cryptodev/rte_cryptodev.c | 113 ++-
lib/librte_cryptodev/rte_cryptodev.h | 183 ++---
lib/librte_cryptodev/rte_cryptodev_pmd.h | 32 +-
lib/librte_cryptodev/rte_cryptodev_version.map | 1 +
lib/librte_mbuf/rte_mbuf.h | 6 -
lib/librte_mbuf_offload/Makefile | 52 --
lib/librte_mbuf_offload/rte_mbuf_offload.c | 100 ---
lib/librte_mbuf_offload/rte_mbuf_offload.h | 307 -------
.../rte_mbuf_offload_version.map | 7 -
27 files changed, 2114 insertions(+), 2005 deletions(-) create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
delete mode 100644 lib/librte_mbuf_offload/Makefile delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h
delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map
--
2.5.0
^ permalink raw reply [flat|nested] 62+ messages in thread
end of thread, other threads:[~2016-03-15 7:48 UTC | newest]
Thread overview: 62+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-01-30 13:07 [dpdk-dev] [PATCH] cryptodev: API change to rte_crypto_op bursts Declan Doherty
2016-02-08 17:50 ` Trahe, Fiona
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 0/2] cryptodev API changes Declan Doherty
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 1/2] cryptodev: API tidy and changes to support future extensions Declan Doherty
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 2/2] cryptodev: change burst API to be crypto op oriented Declan Doherty
2016-02-22 11:17 ` Trahe, Fiona
2016-02-22 18:23 ` Trahe, Fiona
2016-02-22 18:56 ` Trahe, Fiona
2016-02-26 17:30 ` [dpdk-dev] [PATCH v3 0/2] cryptodev API changes Declan Doherty
2016-02-26 17:30 ` [dpdk-dev] [PATCH v3 1/2] cryptodev: API tidy and changes to support future extensions Declan Doherty
2016-02-26 17:30 ` [dpdk-dev] [PATCH v3 2/2] cryptodev: change burst API to be crypto op oriented Declan Doherty
2016-02-29 16:00 ` [dpdk-dev] [PATCH v3 0/2] cryptodev API changes Declan Doherty
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 " Declan Doherty
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 1/2] cryptodev: API tidy and changes to support future extensions Declan Doherty
2016-03-04 14:43 ` Thomas Monjalon
2016-02-29 16:52 ` [dpdk-dev] [PATCH v4 2/2] cryptodev: change burst API to be crypto op oriented Declan Doherty
2016-02-29 17:47 ` [dpdk-dev] [PATCH v4 0/2] cryptodev API changes Trahe, Fiona
2016-03-04 17:17 ` [dpdk-dev] [PATCH v5 " Fiona Trahe
2016-03-04 17:38 ` Thomas Monjalon
2016-03-04 17:43 ` Trahe, Fiona
2016-03-04 17:45 ` Thomas Monjalon
2016-03-04 18:01 ` Trahe, Fiona
2016-03-04 17:39 ` Trahe, Fiona
2016-03-15 6:48 ` Cao, Min
2016-03-04 17:17 ` [dpdk-dev] [PATCH v5 1/2] This patch splits symmetric specific definitions and functions away from the common crypto APIs to facilitate the future extension and expansion of the cryptodev framework, in order to allow asymmetric crypto operations to be introduced at a later date, as well as to clean the logical structure of the public includes. The patch also introduces the _sym prefix to symmetric specific structure and functions to improve clarity in the API Fiona Trahe
2016-03-04 17:17 ` [dpdk-dev] [PATCH v5 2/2] This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts rte_crypto_op's rather than the current implementation which operates on rte_mbuf bursts, this simplifies the burst processing in the crypto PMDs and the use of crypto operations in general Fiona Trahe
2016-03-04 18:29 ` [dpdk-dev] [PATCH v6 0/2] cryptodev API changes Fiona Trahe
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 " Fiona Trahe
2016-03-07 13:23 ` De Lara Guarch, Pablo
2016-03-07 13:53 ` Jain, Deepak K
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 0/5] " Fiona Trahe
2016-03-10 14:05 ` De Lara Guarch, Pablo
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 " Fiona Trahe
2016-03-10 16:14 ` Thomas Monjalon
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 1/5] cryptodev: code cleanup Fiona Trahe
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 2/5] cryptodev: refactor to partition common from symmetric-specific code Fiona Trahe
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 3/5] cryptodev: remove unused phys_addr field from key Fiona Trahe
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 4/5] cryptodev: change burst API to be crypto op oriented Fiona Trahe
2016-03-10 16:03 ` Thomas Monjalon
2016-03-10 16:13 ` Trahe, Fiona
2016-03-10 15:41 ` [dpdk-dev] [PATCH v9 5/5] mbuf_offload: remove library Fiona Trahe
2016-03-14 8:59 ` [dpdk-dev] [PATCH v8 0/5] cryptodev API changes Cao, Min
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 1/5] cryptodev: code cleanup Fiona Trahe
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 2/5] cryptodev: refactor to partition common from symmetric-specific code Fiona Trahe
2016-03-10 13:42 ` [dpdk-dev] [PATCH v8 3/5] cryptodev: remove unused phys_addr field from key Fiona Trahe
2016-03-10 13:43 ` [dpdk-dev] [PATCH v8 4/5] cryptodev: change burst API to be crypto op oriented Fiona Trahe
2016-03-10 14:03 ` Thomas Monjalon
2016-03-10 13:43 ` [dpdk-dev] [PATCH v8 5/5] mbuf_offload: remove library Fiona Trahe
2016-03-15 5:21 ` [dpdk-dev] [PATCH v7 0/2] cryptodev API changes Cao, Min
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 1/2] cryptodev: API tidy and changes to support future extensions Fiona Trahe
2016-03-08 14:10 ` Thomas Monjalon
2016-03-10 10:30 ` Trahe, Fiona
2016-03-07 11:50 ` [dpdk-dev] [PATCH v7 2/2] cryptodev: change burst API to be crypto op oriented Fiona Trahe
2016-03-08 14:32 ` Thomas Monjalon
2016-03-09 12:55 ` Trahe, Fiona
2016-03-10 10:28 ` Trahe, Fiona
2016-03-15 6:46 ` [dpdk-dev] [PATCH v6 0/2] cryptodev API changes Cao, Min
2016-03-04 18:29 ` [dpdk-dev] [PATCH v6 1/2] cryptodev: API tidy and changes to support future extensions Fiona Trahe
2016-03-04 18:29 ` [dpdk-dev] [PATCH v6 2/2] cryptodev: change burst API to be crypto op oriented Fiona Trahe
2016-03-15 6:57 ` [dpdk-dev] [PATCH v4 0/2] cryptodev API changes Cao, Min
2016-03-15 7:07 ` [dpdk-dev] [PATCH v3 " Cao, Min
2016-03-15 7:48 ` [dpdk-dev] [PATCH v2 " Cao, Min
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).