DPDK patches and discussions
 help / color / mirror / Atom feed
From: Fiona Trahe <fiona.trahe@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v8 4/5] cryptodev: change burst API to be crypto op oriented
Date: Thu, 10 Mar 2016 13:43:00 +0000	[thread overview]
Message-ID: <1457617381-8296-5-git-send-email-fiona.trahe@intel.com> (raw)
In-Reply-To: <1457351422-7617-1-git-send-email-fiona.trahe@intel.com>

From: Declan Doherty <declan.doherty@intel.com>

This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts
rte_crypto_op's rather than the current implementation which operates on
rte_mbuf bursts, this simplifies the burst processing in the crypto PMDs and the
use of crypto operations in general, including new functions for managing
rte_crypto_op pools.

These changes continues the separation of the symmetric operation parameters
from the more general operation parameters, which will simplify the integration
of asymmetric crypto operations in the future.

PMDs, unit tests and sample applications are also modified to work with the
modified and new API.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
---
 app/test/test_cryptodev.c                          | 640 +++++++++++----------
 app/test/test_cryptodev_perf.c                     | 221 ++++---
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c         | 133 +++--
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c     |  12 +-
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h |   4 +-
 drivers/crypto/qat/qat_crypto.c                    | 132 +++--
 drivers/crypto/qat/qat_crypto.h                    |  12 +-
 drivers/crypto/qat/rte_qat_cryptodev.c             |   4 +-
 examples/l2fwd-crypto/main.c                       | 164 +++---
 lib/librte_cryptodev/rte_crypto.h                  | 352 +++++++++++-
 lib/librte_cryptodev/rte_crypto_sym.h              | 374 ++++++------
 lib/librte_cryptodev/rte_cryptodev.c               |  76 +++
 lib/librte_cryptodev/rte_cryptodev.h               | 108 ++--
 lib/librte_cryptodev/rte_cryptodev_version.map     |   5 +-
 14 files changed, 1398 insertions(+), 839 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 5ced183..55367df 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -48,7 +48,7 @@ static enum rte_cryptodev_type gbl_cryptodev_type;
 
 struct crypto_testsuite_params {
 	struct rte_mempool *mbuf_pool;
-	struct rte_mempool *mbuf_ol_pool;
+	struct rte_mempool *op_mpool;
 	struct rte_cryptodev_config conf;
 	struct rte_cryptodev_qp_conf qp_conf;
 
@@ -62,8 +62,7 @@ struct crypto_unittest_params {
 
 	struct rte_cryptodev_sym_session *sess;
 
-	struct rte_mbuf_offload *ol;
-	struct rte_crypto_sym_op *op;
+	struct rte_crypto_op *op;
 
 	struct rte_mbuf *obuf, *ibuf;
 
@@ -112,19 +111,21 @@ hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
 }
 #endif
 
-static struct rte_mbuf *
-process_crypto_request(uint8_t dev_id, struct rte_mbuf *ibuf)
+static struct rte_crypto_op *
+process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)
 {
-	struct rte_mbuf *obuf = NULL;
 #if HEX_DUMP
 	hexdump_mbuf_data(stdout, "Enqueued Packet", ibuf);
 #endif
 
-	if (rte_cryptodev_enqueue_burst(dev_id, 0, &ibuf, 1) != 1) {
+	if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
 		printf("Error sending packet for encryption");
 		return NULL;
 	}
-	while (rte_cryptodev_dequeue_burst(dev_id, 0, &obuf, 1) == 0)
+
+	op = NULL;
+
+	while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
 		rte_pause();
 
 #if HEX_DUMP
@@ -132,7 +133,7 @@ process_crypto_request(uint8_t dev_id, struct rte_mbuf *ibuf)
 		hexdump_mbuf_data(stdout, "Dequeued Packet", obuf);
 #endif
 
-	return obuf;
+	return op;
 }
 
 static struct crypto_testsuite_params testsuite_params = { NULL };
@@ -162,13 +163,14 @@ testsuite_setup(void)
 		}
 	}
 
-	ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
-			"MBUF_OFFLOAD_POOL",
+	ts_params->op_mpool = rte_crypto_op_pool_create(
+			"MBUF_CRYPTO_SYM_OP_POOL",
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 			NUM_MBUFS, MBUF_CACHE_SIZE,
 			DEFAULT_NUM_XFORMS *
 			sizeof(struct rte_crypto_sym_xform),
 			rte_socket_id());
-	if (ts_params->mbuf_ol_pool == NULL) {
+	if (ts_params->op_mpool == NULL) {
 		RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
 		return TEST_FAILED;
 	}
@@ -253,10 +255,9 @@ testsuite_teardown(void)
 		rte_mempool_count(ts_params->mbuf_pool));
 	}
 
-
-	if (ts_params->mbuf_ol_pool != NULL) {
+	if (ts_params->op_mpool != NULL) {
 		RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n",
-		rte_mempool_count(ts_params->mbuf_ol_pool));
+		rte_mempool_count(ts_params->op_mpool));
 	}
 
 }
@@ -326,8 +327,8 @@ ut_teardown(void)
 	}
 
 	/* free crypto operation structure */
-	if (ut_params->ol)
-		rte_pktmbuf_offload_free(ut_params->ol);
+	if (ut_params->op)
+		rte_crypto_op_free(ut_params->op);
 
 	/*
 	 * free mbuf - both obuf and ibuf are usually the same,
@@ -793,53 +794,59 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
 			&ut_params->cipher_xform);
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
-	/* Generate Crypto op data structure */
-	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO_SYM);
-	TEST_ASSERT_NOT_NULL(ut_params->ol,
-			"Failed to allocate pktmbuf offload");
+	/* Generate crypto op data structure */
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
 
-	ut_params->op = &ut_params->ol->op.crypto;
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
 
-	/* Set crypto operation data parameters */
-	rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+	struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
 
-	ut_params->op->digest.data = ut_params->digest;
-	ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+	/* set crypto operation source mbuf */
+	sym_op->m_src = ut_params->ibuf;
+
+	/* Set crypto operation authentication parameters */
+	sym_op->auth.digest.data = ut_params->digest;
+	sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
 			ut_params->ibuf, QUOTE_512_BYTES);
-	ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+	sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
 
-	ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
-			CIPHER_IV_LENGTH_AES_CBC);
-	ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
-	ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->auth.data.length = QUOTE_512_BYTES;
 
-	rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+	/* Set crypto operation cipher parameters */
+	sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
 			CIPHER_IV_LENGTH_AES_CBC);
+	sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+	sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
 
-	ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-	ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+	rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+			CIPHER_IV_LENGTH_AES_CBC);
 
-	rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+	sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->cipher.data.length = QUOTE_512_BYTES;
 
 	/* Process crypto operation */
-	ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
-			ut_params->ibuf);
-	TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+			ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
 
 	/* Validate obuf */
-	TEST_ASSERT_BUFFERS_ARE_EQUAL(
-			rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
-			CIPHER_IV_LENGTH_AES_CBC,
+	uint8_t *ciphertext = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+			uint8_t *, CIPHER_IV_LENGTH_AES_CBC);
+
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(ciphertext,
 			catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
 			QUOTE_512_BYTES,
 			"ciphertext data not as expected");
 
-	TEST_ASSERT_BUFFERS_ARE_EQUAL(
-			rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
-			CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+	uint8_t *digest = ciphertext + QUOTE_512_BYTES;
+
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(digest,
 			catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest,
 			gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
 					TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 :
@@ -864,60 +871,66 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
 	TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
 
 	/* Generate Crypto op data structure */
-	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO_SYM);
-	TEST_ASSERT_NOT_NULL(ut_params->ol,
-			"Failed to allocate pktmbuf offload");
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
 
-	ut_params->op = &ut_params->ol->op.crypto;
-
-	TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
-			ut_params->ol, 2),
+	TEST_ASSERT_NOT_NULL(rte_crypto_op_sym_xforms_alloc(ut_params->op, 2),
 			"failed to allocate space for crypto transforms");
 
+	struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+	/* set crypto operation source mbuf */
+	sym_op->m_src = ut_params->ibuf;
+
 	/* Set crypto operation data parameters */
-	ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+	sym_op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
 
 	/* cipher parameters */
-	ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
-	ut_params->op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
-	ut_params->op->xform->cipher.key.data = aes_cbc_key;
-	ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
+	sym_op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+	sym_op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+	sym_op->xform->cipher.key.data = aes_cbc_key;
+	sym_op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
 
 	/* hash parameters */
-	ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+	sym_op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
 
-	ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
-	ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
-	ut_params->op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
-	ut_params->op->xform->next->auth.key.data = hmac_sha1_key;
-	ut_params->op->xform->next->auth.digest_length =
+	sym_op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+	sym_op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+	sym_op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
+	sym_op->xform->next->auth.key.data = hmac_sha1_key;
+	sym_op->xform->next->auth.digest_length =
 			DIGEST_BYTE_LENGTH_SHA1;
 
-	ut_params->op->digest.data = ut_params->digest;
-	ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+	sym_op->auth.digest.data = ut_params->digest;
+	sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
 			ut_params->ibuf, QUOTE_512_BYTES);
-	ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+	sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
 
-	ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
-			CIPHER_IV_LENGTH_AES_CBC);
-	ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
-	ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
 
-	rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+	sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->auth.data.length = QUOTE_512_BYTES;
+
+	sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
 			CIPHER_IV_LENGTH_AES_CBC);
+	sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+	sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
 
-	ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-	ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+	rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+			CIPHER_IV_LENGTH_AES_CBC);
 
-	rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+	sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->cipher.data.length = QUOTE_512_BYTES;
 
 	/* Process crypto operation */
-	ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
-			ut_params->ibuf);
-	TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+			ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
+
+	ut_params->obuf = ut_params->op->sym->m_src;
 
 	/* Validate obuf */
 	TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -986,42 +999,48 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
 	/* Generate Crypto op data structure */
-	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO_SYM);
-	TEST_ASSERT_NOT_NULL(ut_params->ol,
-			"Failed to allocate pktmbuf offload");
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
 
-	ut_params->op = &ut_params->ol->op.crypto;
+	/* attach symmetric crypto session to crypto operations */
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
 
+	struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
 
-	/* Set crypto operation data parameters */
-	rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+	/* set crypto operation source mbuf */
+	sym_op->m_src = ut_params->ibuf;
 
-	ut_params->op->digest.data = ut_params->digest;
-	ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+	sym_op->auth.digest.data = ut_params->digest;
+	sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
 			ut_params->ibuf, QUOTE_512_BYTES);
-	ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+	sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
 
-	ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
-			CIPHER_IV_LENGTH_AES_CBC);
-	ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
-	ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->auth.data.length = QUOTE_512_BYTES;
 
-	rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+	sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
 			CIPHER_IV_LENGTH_AES_CBC);
+	sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+	sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
 
-	ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+	rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+			CIPHER_IV_LENGTH_AES_CBC);
 
-	ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+	sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->cipher.data.length = QUOTE_512_BYTES;
 
-	rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
 
 	/* Process crypto operation */
-	ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
-			ut_params->ibuf);
-	TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+			ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
+
+	ut_params->obuf = ut_params->op->sym->m_src;
+
 
 	/* Validate obuf */
 	TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1089,47 +1108,51 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
 	ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
 
 	/* Create Crypto session*/
-	ut_params->sess =
-		rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
-						&ut_params->cipher_xform);
+	ut_params->sess = rte_cryptodev_sym_session_create(
+			ts_params->valid_devs[0],
+			&ut_params->cipher_xform);
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
 	/* Generate Crypto op data structure */
-	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO_SYM);
-	TEST_ASSERT_NOT_NULL(ut_params->ol,
-			"Failed to allocate pktmbuf offload");
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
 
-	ut_params->op = &ut_params->ol->op.crypto;
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
 
+	struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
 
-	/* Set crypto operation data parameters */
-	rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+	/* set crypto operation source mbuf */
+	sym_op->m_src = ut_params->ibuf;
 
-	ut_params->op->digest.data = ut_params->digest;
-	ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+	sym_op->auth.digest.data = ut_params->digest;
+	sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
 			ut_params->ibuf, QUOTE_512_BYTES);
-	ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+	sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
 
-	ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
-			CIPHER_IV_LENGTH_AES_CBC);
-	ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
-	ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->auth.data.length = QUOTE_512_BYTES;
 
-	rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+	sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
 			CIPHER_IV_LENGTH_AES_CBC);
+	sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+	sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
 
-	ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-	ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+	rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+			CIPHER_IV_LENGTH_AES_CBC);
 
-	rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+	sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->cipher.data.length = QUOTE_512_BYTES;
 
 	/* Process crypto operation */
-	ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
-			ut_params->ibuf);
-	TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+			ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
+
+	ut_params->obuf = ut_params->op->sym->m_src;
 
 	/* Validate obuf */
 	TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1198,42 +1221,47 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
 	/* Generate Crypto op data structure */
-	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO_SYM);
-	TEST_ASSERT_NOT_NULL(ut_params->ol,
-			"Failed to allocate pktmbuf offload");
-
-	ut_params->op = &ut_params->ol->op.crypto;
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
 
 
 	/* Set crypto operation data parameters */
-	rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+	struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
 
-	ut_params->op->digest.data = ut_params->digest;
-	ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+	/* set crypto operation source mbuf */
+	sym_op->m_src = ut_params->ibuf;
+
+	sym_op->auth.digest.data = ut_params->digest;
+	sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
 			ut_params->ibuf, QUOTE_512_BYTES);
-	ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+	sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+	sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->auth.data.length = QUOTE_512_BYTES;
 
-	ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+	sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
 			ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
-	ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
-	ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+	sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
 
-	rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+	rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
 			CIPHER_IV_LENGTH_AES_CBC);
 
-	ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+	sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->cipher.data.length = QUOTE_512_BYTES;
 
-	ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+	/* Process crypto operation */
+	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+			ut_params->op), "failed to process sym crypto op");
 
-	rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
 
-	/* Process crypto operation */
-	ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
-			ut_params->ibuf);
-	TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+	ut_params->obuf = ut_params->op->sym->m_src;
 
 	/* Validate obuf */
 	TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1312,43 +1340,46 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
 
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
-
 	/* Generate Crypto op data structure */
-	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO_SYM);
-	TEST_ASSERT_NOT_NULL(ut_params->ol,
-			"Failed to allocate pktmbuf offload");
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
 
-	ut_params->op = &ut_params->ol->op.crypto;
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
 
+	struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
 
-	/* Set crypto operation data parameters */
-	rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+	/* set crypto operation source mbuf */
+	sym_op->m_src = ut_params->ibuf;
 
-	ut_params->op->digest.data = ut_params->digest;
-	ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+	sym_op->auth.digest.data = ut_params->digest;
+	sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
 			ut_params->ibuf, QUOTE_512_BYTES);
-	ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+	sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
 
-	ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
-			CIPHER_IV_LENGTH_AES_CBC);
-	ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
-	ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->auth.data.length = QUOTE_512_BYTES;
 
-	rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+	sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
 			CIPHER_IV_LENGTH_AES_CBC);
+	sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+	sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
 
-	ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-	ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+	rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+			CIPHER_IV_LENGTH_AES_CBC);
 
-	rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+	sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->cipher.data.length = QUOTE_512_BYTES;
 
 	/* Process crypto operation */
-	ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
-			ut_params->ibuf);
-	TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+			ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
+
+	ut_params->obuf = ut_params->op->sym->m_src;
 
 	/* Validate obuf */
 	TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1448,43 +1479,46 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
 			DIGEST_BYTE_LENGTH_SHA512);
 
 	/* Generate Crypto op data structure */
-	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO_SYM);
-	TEST_ASSERT_NOT_NULL(ut_params->ol,
-			"Failed to allocate pktmbuf offload");
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
 
-	ut_params->op = &ut_params->ol->op.crypto;
+	rte_crypto_op_attach_sym_session(ut_params->op, sess);
 
+	struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
 
-	/* Set crypto operation data parameters */
-	rte_crypto_sym_op_attach_session(ut_params->op, sess);
+	/* set crypto operation source mbuf */
+	sym_op->m_src = ut_params->ibuf;
 
-	ut_params->op->digest.data = ut_params->digest;
-	ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+	sym_op->auth.digest.data = ut_params->digest;
+	sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
 			ut_params->ibuf, QUOTE_512_BYTES);
-	ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+	sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
 
-	ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+	sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->auth.data.length = QUOTE_512_BYTES;
+
+	sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
 			ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
-	ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+	sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
 			ut_params->ibuf, 0);
-	ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
 
-	rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+	rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
 			CIPHER_IV_LENGTH_AES_CBC);
 
-	ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+	sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->cipher.data.length = QUOTE_512_BYTES;
 
-	ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+	/* Process crypto operation */
+	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+			ut_params->op), "failed to process sym crypto op");
 
-	rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
 
-	/* Process crypto operation */
-	ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
-			ut_params->ibuf);
-	TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+	ut_params->obuf = ut_params->op->sym->m_src;
 
 	/* Validate obuf */
 	TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1522,10 +1556,6 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
 	ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
 			catch_22_quote, QUOTE_512_BYTES, 0);
 
-	ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
-			DIGEST_BYTE_LENGTH_AES_XCBC);
-	TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
 	/* Setup Cipher Parameters */
 	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
 	ut_params->cipher_xform.next = &ut_params->auth_xform;
@@ -1546,54 +1576,71 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
 	ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
 
 	/* Create Crypto session*/
-	ut_params->sess =
-		rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
-						&ut_params->cipher_xform);
+	ut_params->sess = rte_cryptodev_sym_session_create(
+			ts_params->valid_devs[0],
+			&ut_params->cipher_xform);
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
 	/* Generate Crypto op data structure */
-	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO_SYM);
-	TEST_ASSERT_NOT_NULL(ut_params->ol,
-			"Failed to allocate pktmbuf offload");
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
 
-	ut_params->op = &ut_params->ol->op.crypto;
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
 
+	struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
 
-	/* Set crypto operation data parameters */
-	rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+	/* set crypto operation source mbuf */
+	sym_op->m_src = ut_params->ibuf;
 
-	ut_params->op->iv.data = (uint8_t *)
-		rte_pktmbuf_prepend(ut_params->ibuf,
-				CIPHER_IV_LENGTH_AES_CBC);
-	ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
-	ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+	/* Set operation cipher parameters */
+	sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+			sym_op->m_src, CIPHER_IV_LENGTH_AES_CBC);
+	sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(sym_op->m_src);
+	sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
 
-	rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+	rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
 			CIPHER_IV_LENGTH_AES_CBC);
 
-	ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-	ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+	sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->cipher.data.length = QUOTE_512_BYTES;
+
+	/* Set operation authentication parameters */
+	sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+			sym_op->m_src, DIGEST_BYTE_LENGTH_AES_XCBC);
+	sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+			sym_op->m_src,
+			CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES);
+	sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+	memset(sym_op->auth.digest.data, 0, DIGEST_BYTE_LENGTH_AES_XCBC);
+
+	sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->auth.data.length = QUOTE_512_BYTES;
 
-	rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
 
 	/* Process crypto operation */
-	ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
-			ut_params->ibuf);
-	TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+			ut_params->op);
+	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
 
 	/* Validate obuf */
 	TEST_ASSERT_BUFFERS_ARE_EQUAL(
-			rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
-			CIPHER_IV_LENGTH_AES_CBC,
+			rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+					uint8_t *, CIPHER_IV_LENGTH_AES_CBC),
 			catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
 			QUOTE_512_BYTES,
 			"Ciphertext data not as expected");
+
 	TEST_ASSERT_BUFFERS_ARE_EQUAL(
-			rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
-			CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+			rte_pktmbuf_mtod_offset(
+					ut_params->op->sym->m_src, uint8_t *,
+					CIPHER_IV_LENGTH_AES_CBC +
+					QUOTE_512_BYTES),
 			catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
 			DIGEST_BYTE_LENGTH_AES_XCBC,
 			"Generated digest data not as expected");
@@ -1612,14 +1659,6 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
 		(const char *)catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
 		QUOTE_512_BYTES, 0);
 
-	ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
-			DIGEST_BYTE_LENGTH_AES_XCBC);
-	TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
-	rte_memcpy(ut_params->digest,
-			catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
-			DIGEST_BYTE_LENGTH_AES_XCBC);
-
 	/* Setup Cipher Parameters */
 	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
 	ut_params->cipher_xform.next = NULL;
@@ -1646,35 +1685,55 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
 	/* Generate Crypto op data structure */
-	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO_SYM);
-	TEST_ASSERT_NOT_NULL(ut_params->ol,
-			"Failed to allocate pktmbuf offload");
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
 
-	ut_params->op = &ut_params->ol->op.crypto;
+	/* Set crypto operation data parameters */
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
 
+	struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
 
-	/* Set crypto operation data parameters */
-	rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+	/* set crypto operation source mbuf */
+	sym_op->m_src = ut_params->ibuf;
 
-	ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
-			CIPHER_IV_LENGTH_AES_CBC);
-	ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
-	ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
 
-	rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+	sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+				ut_params->ibuf, DIGEST_BYTE_LENGTH_AES_XCBC);
+	TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+			"no room to append digest");
+
+	sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+			ut_params->ibuf, QUOTE_512_BYTES);
+	sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
+
+	rte_memcpy(sym_op->auth.digest.data,
+			catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
+			DIGEST_BYTE_LENGTH_AES_XCBC);
+
+	sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->auth.data.length = QUOTE_512_BYTES;
+
+	sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+			ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
+	sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+	sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+	rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
 			CIPHER_IV_LENGTH_AES_CBC);
 
-	ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-	ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
-	rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+	sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->cipher.data.length = QUOTE_512_BYTES;
 
 	/* Process crypto operation */
-	ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
-			ut_params->ibuf);
-	TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+			ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
+
+	ut_params->obuf = ut_params->op->sym->m_src;
 
 	/* Validate obuf */
 	TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1835,50 +1894,53 @@ test_not_in_place_crypto(void)
 			DIGEST_BYTE_LENGTH_SHA512);
 
 	/* Generate Crypto op data structure */
-	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO_SYM);
-	TEST_ASSERT_NOT_NULL(ut_params->ol,
-			"Failed to allocate pktmbuf offload");
-
-	ut_params->op = &ut_params->ol->op.crypto;
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
 
 
 	/* Set crypto operation data parameters */
-	rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+	struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+	/* set crypto operation source mbuf */
+	sym_op->m_src = ut_params->ibuf;
+	sym_op->m_dst = dst_m;
 
-	ut_params->op->digest.data = ut_params->digest;
-	ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+	sym_op->auth.digest.data = ut_params->digest;
+	sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
 			ut_params->ibuf, QUOTE_512_BYTES);
-	ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+	sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
 
-	ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+	sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->auth.data.length = QUOTE_512_BYTES;
+
+
+	sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
 			ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
-	ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+	sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
 			ut_params->ibuf, 0);
-	ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
 
-	rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+	rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
 			CIPHER_IV_LENGTH_AES_CBC);
 
-	ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-
-	ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
-	ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
-
-	ut_params->op->dst.m = dst_m;
-	ut_params->op->dst.offset = 0;
-
-	rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+	sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+	sym_op->cipher.data.length = QUOTE_512_BYTES;
 
 	/* Process crypto operation */
-	ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
-			ut_params->ibuf);
-	TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+			ut_params->op);
+	TEST_ASSERT_NOT_NULL(ut_params->op, "no crypto operation returned");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto operation processing failed");
 
 	/* Validate obuf */
 	TEST_ASSERT_BUFFERS_ARE_EQUAL(
-			rte_pktmbuf_mtod(ut_params->op->dst.m, char *),
+			rte_pktmbuf_mtod(ut_params->op->sym->m_dst, char *),
 			catch_22_quote,
 			QUOTE_512_BYTES,
 			"Plaintext data not as expected");
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index b0c8abf..b43f9aa 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -50,7 +50,7 @@
 
 struct crypto_testsuite_params {
 	struct rte_mempool *mbuf_mp;
-	struct rte_mempool *mbuf_ol_pool;
+	struct rte_mempool *op_mpool;
 
 	uint16_t nb_queue_pairs;
 
@@ -68,8 +68,7 @@ struct crypto_unittest_params {
 
 	struct rte_cryptodev_sym_session *sess;
 
-	struct rte_crypto_sym_op *op;
-	struct rte_mbuf_offload *ol;
+	struct rte_crypto_op *op;
 
 	struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
 	struct rte_mbuf *ibuf[MAX_NUM_OF_OPS_PER_UT];
@@ -79,7 +78,7 @@ struct crypto_unittest_params {
 
 static struct rte_mbuf *
 setup_test_string(struct rte_mempool *mpool,
-		const char *string, size_t len, uint8_t blocksize)
+		const uint8_t *data, size_t len, uint8_t blocksize)
 {
 	struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
 	size_t t_len = len - (blocksize ? (len % blocksize) : 0);
@@ -92,7 +91,7 @@ setup_test_string(struct rte_mempool *mpool,
 			return NULL;
 		}
 
-		rte_memcpy(dst, string, t_len);
+		rte_memcpy(dst, (const void *)data, t_len);
 	}
 	return m;
 }
@@ -113,23 +112,24 @@ testsuite_setup(void)
 	ts_params->mbuf_mp = rte_mempool_lookup("CRYPTO_PERF_MBUFPOOL");
 	if (ts_params->mbuf_mp == NULL) {
 		/* Not already created so create */
-		ts_params->mbuf_mp = rte_mempool_create("CRYPTO_PERF_MBUFPOOL", NUM_MBUFS,
-			MBUF_SIZE, MBUF_CACHE_SIZE,
-			sizeof(struct rte_pktmbuf_pool_private),
-			rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
-			rte_socket_id(), 0);
+		ts_params->mbuf_mp = rte_pktmbuf_pool_create(
+				"CRYPTO_PERF_MBUFPOOL",
+				NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
+				rte_socket_id());
 		if (ts_params->mbuf_mp == NULL) {
 			RTE_LOG(ERR, USER1, "Can't create CRYPTO_PERF_MBUFPOOL\n");
 			return TEST_FAILED;
 		}
 	}
 
-	ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
-				NUM_MBUFS, MBUF_CACHE_SIZE,
-				DEFAULT_NUM_XFORMS *
-				sizeof(struct rte_crypto_sym_xform),
-				rte_socket_id());
-		if (ts_params->mbuf_ol_pool == NULL) {
+
+	ts_params->op_mpool = rte_crypto_op_pool_create("CRYPTO_OP_POOL",
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+			NUM_MBUFS, MBUF_CACHE_SIZE,
+			DEFAULT_NUM_XFORMS *
+			sizeof(struct rte_crypto_sym_xform),
+			rte_socket_id());
+		if (ts_params->op_mpool == NULL) {
 			RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
 			return TEST_FAILED;
 		}
@@ -256,8 +256,8 @@ ut_teardown(void)
 				ut_params->sess);
 
 	/* free crypto operation structure */
-	if (ut_params->ol)
-		rte_pktmbuf_offload_free(ut_params->ol);
+	if (ut_params->op)
+		rte_crypto_op_free(ut_params->op);
 
 	for (i = 0; i < MAX_NUM_OF_OPS_PER_UT; i++) {
 		if (ut_params->obuf[i])
@@ -1698,11 +1698,12 @@ struct crypto_data_params aes_cbc_hmac_sha256_output[MAX_PACKET_SIZE_INDEX] = {
 static int
 test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
 {
-	uint32_t num_to_submit = 2048, max_outstanding_reqs = 512;
-	struct rte_mbuf *rx_mbufs[num_to_submit], *tx_mbufs[num_to_submit];
+	uint32_t num_to_submit = 4096;
+	struct rte_crypto_op *c_ops[num_to_submit];
+	struct rte_crypto_op *proc_ops[num_to_submit];
 	uint64_t failed_polls, retries, start_cycles, end_cycles, total_cycles = 0;
 	uint32_t burst_sent, burst_received;
-	uint32_t b, burst_size, num_sent, num_received;
+	uint32_t i, burst_size, num_sent, num_received;
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
 	struct crypto_unittest_params *ut_params = &unittest_params;
 	struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1739,46 +1740,50 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
 	/* Generate Crypto op data structure(s) */
-	for (b = 0; b < num_to_submit ; b++) {
-		tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
-			(const char *)data_params[0].expected.ciphertext,
+	for (i = 0; i < num_to_submit ; i++) {
+		struct rte_mbuf *m = setup_test_string(ts_params->mbuf_mp,
+				data_params[0].expected.ciphertext,
 				data_params[0].length, 0);
-		TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
+		TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
 
-		ut_params->digest = (uint8_t *)rte_pktmbuf_append(tx_mbufs[b],
+		ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
 				DIGEST_BYTE_LENGTH_SHA256);
-		TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+		TEST_ASSERT_NOT_NULL(ut_params->digest,
+				"no room to append digest");
 
 		rte_memcpy(ut_params->digest, data_params[0].expected.digest,
 			DIGEST_BYTE_LENGTH_SHA256);
 
-		struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
-			ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
-		TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
 
-		struct rte_crypto_sym_op *cop = &ol->op.crypto;
+		struct rte_crypto_op *op =
+				rte_crypto_op_alloc(ts_params->op_mpool,
+						RTE_CRYPTO_OP_TYPE_SYMMETRIC);
 
-		rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+		rte_crypto_op_attach_sym_session(op, ut_params->sess);
 
-		cop->digest.data = ut_params->digest;
-		cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
+		op->sym->auth.digest.data = ut_params->digest;
+		op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
 				data_params[0].length);
-		cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+		op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+		op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+		op->sym->auth.data.length = data_params[0].length;
+
 
-		cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+		op->sym->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(m,
 				CIPHER_IV_LENGTH_AES_CBC);
-		cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
-		cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+		op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+		op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
 
-		rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+		rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
+				CIPHER_IV_LENGTH_AES_CBC);
 
-		cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
-		cop->data.to_cipher.length = data_params[0].length;
+		op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+		op->sym->cipher.data.length = data_params[0].length;
 
-		cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
-		cop->data.to_hash.length = data_params[0].length;
+		op->sym->m_src = m;
 
-		rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+		c_ops[i] = op;
 	}
 
 	printf("\nTest to measure the IA cycle cost using AES128_CBC_SHA256_HMAC "
@@ -1789,17 +1794,17 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
 	printf("\nDev No\tQP No\tNum Sent\tNum Received\tTx/Rx burst");
 	printf("\tRetries (Device Busy)\tAverage IA cycle cost "
 			"(assuming 0 retries)");
-	for (b = 2; b <= 128 ; b *= 2) {
+	for (i = 2; i <= 128 ; i *= 2) {
 		num_sent = 0;
 		num_received = 0;
 		retries = 0;
 		failed_polls = 0;
-		burst_size = b;
+		burst_size = i;
 		total_cycles = 0;
 		while (num_sent < num_to_submit) {
 			start_cycles = rte_rdtsc_precise();
-			burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0,
-					&tx_mbufs[num_sent],
+			burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+					0, &c_ops[num_sent],
 					((num_to_submit-num_sent) < burst_size) ?
 					num_to_submit-num_sent : burst_size);
 			if (burst_sent == 0)
@@ -1814,9 +1819,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
 			rte_delay_ms(1);
 
 			start_cycles = rte_rdtsc_precise();
-			burst_received =
-				rte_cryptodev_dequeue_burst(dev_num,
-						0, rx_mbufs, burst_size);
+			burst_received = rte_cryptodev_dequeue_burst(
+					dev_num, 0, proc_ops, burst_size);
 			if (burst_received == 0)
 				failed_polls++;
 			else
@@ -1831,9 +1835,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
 				rte_cryptodev_enqueue_burst(dev_num, 0,
 						NULL, 0);
 
-			burst_received =
-				rte_cryptodev_dequeue_burst(dev_num,
-						0, rx_mbufs, burst_size);
+			burst_received = rte_cryptodev_dequeue_burst(
+					dev_num, 0, proc_ops, burst_size);
 			if (burst_received == 0)
 				failed_polls++;
 			else
@@ -1847,16 +1850,9 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
 	}
 	printf("\n");
 
-	for (b = 0; b < max_outstanding_reqs ; b++) {
-		struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
-		if (ol) {
-			do {
-				rte_pktmbuf_offload_free(ol);
-				ol = ol->next;
-			} while (ol != NULL);
-		}
-		rte_pktmbuf_free(tx_mbufs[b]);
+	for (i = 0; i < num_to_submit ; i++) {
+		rte_pktmbuf_free(c_ops[i]->sym->m_src);
+		rte_crypto_op_free(c_ops[i]);
 	}
 	return TEST_SUCCESS;
 }
@@ -1870,7 +1866,10 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
 	uint64_t failed_polls, retries, start_cycles, end_cycles;
 	const uint64_t mhz = rte_get_tsc_hz()/1000000;
 	double throughput, mmps;
-	struct rte_mbuf *rx_mbufs[DEFAULT_BURST_SIZE], *tx_mbufs[DEFAULT_BURST_SIZE];
+
+	struct rte_crypto_op *c_ops[DEFAULT_BURST_SIZE];
+	struct rte_crypto_op *proc_ops[DEFAULT_BURST_SIZE];
+
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
 	struct crypto_unittest_params *ut_params = &unittest_params;
 	struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1919,63 +1918,70 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
 
 		/* Generate Crypto op data structure(s) */
 		for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
-			tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
+			struct rte_mbuf *m = setup_test_string(
+					ts_params->mbuf_mp,
+					(const uint8_t *)
 					data_params[index].plaintext,
 					data_params[index].length,
 					0);
 
-			ut_params->digest = (uint8_t *)rte_pktmbuf_append(
-				tx_mbufs[b], DIGEST_BYTE_LENGTH_SHA256);
-			TEST_ASSERT_NOT_NULL(ut_params->digest,	"no room to append digest");
+			ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
+					DIGEST_BYTE_LENGTH_SHA256);
+			TEST_ASSERT_NOT_NULL(ut_params->digest
+					, "no room to append digest");
 
-			rte_memcpy(ut_params->digest, data_params[index].expected.digest,
-			DIGEST_BYTE_LENGTH_SHA256);
+			rte_memcpy(ut_params->digest,
+					data_params[index].expected.digest,
+					DIGEST_BYTE_LENGTH_SHA256);
+
+			struct rte_crypto_op *op = rte_crypto_op_alloc(
+					ts_params->op_mpool,
+					RTE_CRYPTO_OP_TYPE_SYMMETRIC);
 
-			struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
-						ts_params->mbuf_ol_pool,
-						RTE_PKTMBUF_OL_CRYPTO_SYM);
-			TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
+			rte_crypto_op_attach_sym_session(op, ut_params->sess);
 
-			struct rte_crypto_sym_op *cop = &ol->op.crypto;
+			op->sym->auth.digest.data = ut_params->digest;
+			op->sym->auth.digest.phys_addr =
+					rte_pktmbuf_mtophys_offset(m,
+						data_params[index].length);
+			op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
 
-			rte_crypto_sym_op_attach_session(cop, ut_params->sess);
+			op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+			op->sym->auth.data.length = data_params[index].length;
 
-			cop->digest.data = ut_params->digest;
-			cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
-				tx_mbufs[b], data_params[index].length);
-			cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+			op->sym->cipher.iv.data = (uint8_t *)
+					rte_pktmbuf_prepend(m,
+						CIPHER_IV_LENGTH_AES_CBC);
+			op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+			op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
 
-			cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+			rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
 					CIPHER_IV_LENGTH_AES_CBC);
-			cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
-			cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
 
-			rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+			op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+			op->sym->cipher.data.length = data_params[index].length;
 
-			cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
-			cop->data.to_cipher.length = data_params[index].length;
 
-			cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
-			cop->data.to_hash.length = data_params[index].length;
+			op->sym->m_src = m;
 
-			rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+			c_ops[b] = op;
 		}
 		start_cycles = rte_rdtsc_precise();
 		while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
-			burst_sent = rte_cryptodev_enqueue_burst(dev_num,
-					0, tx_mbufs,
-					((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
-							< DEFAULT_BURST_SIZE) ?
-					DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
-							DEFAULT_BURST_SIZE);
+			uint16_t burst_size = (DEFAULT_NUM_REQS_TO_SUBMIT -
+					num_sent) < DEFAULT_BURST_SIZE ?
+						DEFAULT_NUM_REQS_TO_SUBMIT -
+						num_sent : DEFAULT_BURST_SIZE;
+
+			burst_sent = rte_cryptodev_enqueue_burst(
+					dev_num, 0, c_ops, burst_size);
 			if (burst_sent == 0)
 				retries++;
 			else
 				num_sent += burst_sent;
 
-			burst_received =
-				rte_cryptodev_dequeue_burst(dev_num,
-					0, rx_mbufs, DEFAULT_BURST_SIZE);
+			burst_received = rte_cryptodev_dequeue_burst(dev_num,
+					0, proc_ops, DEFAULT_BURST_SIZE);
 			if (burst_received == 0)
 				failed_polls++;
 			else
@@ -1987,9 +1993,9 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
 				rte_cryptodev_enqueue_burst(dev_num, 0,
 						NULL, 0);
 
-			burst_received =
-				rte_cryptodev_dequeue_burst(dev_num, 0,
-						rx_mbufs, DEFAULT_BURST_SIZE);
+			burst_received = rte_cryptodev_dequeue_burst(
+					dev_num, 0, proc_ops,
+					DEFAULT_BURST_SIZE);
 			if (burst_received == 0)
 				failed_polls++;
 			else
@@ -2006,15 +2012,8 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
 		printf("\t%.2f\t%.2f", mmps, throughput);
 		printf("\t\t%"PRIu64, retries);
 		for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
-			struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
-			if (ol) {
-				do {
-					rte_pktmbuf_offload_free(ol);
-					ol = ol->next;
-				} while (ol != NULL);
-			}
-			rte_pktmbuf_free(tx_mbufs[b]);
+			rte_pktmbuf_free(c_ops[b]->sym->m_src);
+			rte_crypto_op_free(c_ops[b]);
 		}
 	}
 
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index f39ebd5..f28b29f 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -296,16 +296,16 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
 
 /** Get multi buffer session */
 static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
 {
 	struct aesni_mb_session *sess = NULL;
 
-	if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
-		if (unlikely(crypto_op->session->type !=
+	if (op->sym->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+		if (unlikely(op->sym->session->type !=
 				RTE_CRYPTODEV_AESNI_MB_PMD))
 			return NULL;
 
-		sess = (struct aesni_mb_session *)crypto_op->session->_private;
+		sess = (struct aesni_mb_session *)op->sym->session->_private;
 	} else  {
 		void *_sess = NULL;
 
@@ -316,7 +316,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
 			((struct rte_cryptodev_sym_session *)_sess)->_private;
 
 		if (unlikely(aesni_mb_set_session_parameters(qp->ops,
-				sess, crypto_op->xform) != 0)) {
+				sess, op->sym->xform) != 0)) {
 			rte_mempool_put(qp->sess_mp, _sess);
 			sess = NULL;
 		}
@@ -338,12 +338,14 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
  * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
  */
 static JOB_AES_HMAC *
-process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
-		struct rte_crypto_sym_op *c_op,
+process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
 		struct aesni_mb_session *session)
 {
 	JOB_AES_HMAC *job;
 
+	struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+	uint16_t m_offset = 0;
+
 	job = (*qp->ops->job.get_next)(&qp->mb_mgr);
 	if (unlikely(job == NULL))
 		return job;
@@ -372,10 +374,26 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
 	}
 
 	/* Mutable crypto operation parameters */
+	if (op->sym->m_dst) {
+		m_src = m_dst = op->sym->m_dst;
+
+		/* append space for output data to mbuf */
+		char *odata = rte_pktmbuf_append(m_dst,
+				rte_pktmbuf_data_len(op->sym->m_src));
+		if (odata == NULL)
+			MB_LOG_ERR("failed to allocate space in destination "
+					"mbuf for source data");
+
+		memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
+				rte_pktmbuf_data_len(op->sym->m_src));
+	} else {
+		m_dst = m_src;
+		m_offset = op->sym->cipher.data.offset;
+	}
 
 	/* Set digest output location */
 	if (job->cipher_direction == DECRYPT) {
-		job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m,
+		job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
 				get_digest_byte_length(job->hash_alg));
 
 		if (job->auth_tag_output == NULL) {
@@ -388,7 +406,7 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
 				sizeof(get_digest_byte_length(job->hash_alg)));
 
 	} else {
-		job->auth_tag_output = c_op->digest.data;
+		job->auth_tag_output = op->sym->auth.digest.data;
 	}
 
 	/*
@@ -399,26 +417,22 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
 			get_truncated_digest_byte_length(job->hash_alg);
 
 	/* Set IV parameters */
-	job->iv = c_op->iv.data;
-	job->iv_len_in_bytes = c_op->iv.length;
+	job->iv = op->sym->cipher.iv.data;
+	job->iv_len_in_bytes = op->sym->cipher.iv.length;
 
 	/* Data  Parameter */
-	job->src = rte_pktmbuf_mtod(m, uint8_t *);
-	job->dst = c_op->dst.m ?
-			rte_pktmbuf_mtod(c_op->dst.m, uint8_t *) +
-			c_op->dst.offset :
-			rte_pktmbuf_mtod(m, uint8_t *) +
-			c_op->data.to_cipher.offset;
+	job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+	job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
 
-	job->cipher_start_src_offset_in_bytes = c_op->data.to_cipher.offset;
-	job->msg_len_to_cipher_in_bytes = c_op->data.to_cipher.length;
+	job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
+	job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
 
-	job->hash_start_src_offset_in_bytes = c_op->data.to_hash.offset;
-	job->msg_len_to_hash_in_bytes = c_op->data.to_hash.length;
+	job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+	job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
 
 	/* Set user data to be crypto operation data struct */
-	job->user_data = m;
-	job->user_data2 = c_op;
+	job->user_data = op;
+	job->user_data2 = m_dst;
 
 	return job;
 }
@@ -433,43 +447,41 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
  * verification of supplied digest in the case of a HASH_CIPHER operation
  * - Returns NULL on invalid job
  */
-static struct rte_mbuf *
+static struct rte_crypto_op *
 post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
 {
-	struct rte_mbuf *m;
-	struct rte_crypto_sym_op *c_op;
+	struct rte_crypto_op *op =
+			(struct rte_crypto_op *)job->user_data;
+	struct rte_mbuf *m_dst =
+			(struct rte_mbuf *)job->user_data2;
 
-	if (job->user_data == NULL)
+	if (op == NULL || m_dst == NULL)
 		return NULL;
 
-	/* handled retrieved job */
-	m = (struct rte_mbuf *)job->user_data;
-	c_op = (struct rte_crypto_sym_op *)job->user_data2;
-
 	/* set status as successful by default */
-	c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 
 	/* check if job has been processed  */
 	if (unlikely(job->status != STS_COMPLETED)) {
-		c_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		return m;
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return op;
 	} else if (job->chain_order == HASH_CIPHER) {
 		/* Verify digest if required */
-		if (memcmp(job->auth_tag_output, c_op->digest.data,
+		if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
 				job->auth_tag_output_len_in_bytes) != 0)
-			c_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
 
 		/* trim area used for digest from mbuf */
-		rte_pktmbuf_trim(m, get_digest_byte_length(job->hash_alg));
+		rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
 	}
 
 	/* Free session if a session-less crypto op */
-	if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
-		rte_mempool_put(qp->sess_mp, c_op->session);
-		c_op->session = NULL;
+	if (op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+		rte_mempool_put(qp->sess_mp, op->sym->session);
+		op->sym->session = NULL;
 	}
 
-	return m;
+	return op;
 }
 
 /**
@@ -485,17 +497,16 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
 static unsigned
 handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
 {
-	struct rte_mbuf *m = NULL;
+	struct rte_crypto_op *op = NULL;
 	unsigned processed_jobs = 0;
 
 	while (job) {
 		processed_jobs++;
-		m = post_process_mb_job(qp, job);
-		if (m)
-			rte_ring_enqueue(qp->processed_pkts, (void *)m);
+		op = post_process_mb_job(qp, job);
+		if (op)
+			rte_ring_enqueue(qp->processed_ops, (void *)op);
 		else
 			qp->stats.dequeue_err_count++;
-
 		job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
 	}
 
@@ -503,11 +514,9 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
 }
 
 static uint16_t
-aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
-		uint16_t nb_bufs)
+aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
 {
-	struct rte_mbuf_offload *ol;
-
 	struct aesni_mb_session *sess;
 	struct aesni_mb_qp *qp = queue_pair;
 
@@ -515,21 +524,23 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
 
 	int i, processed_jobs = 0;
 
-	for (i = 0; i < nb_bufs; i++) {
-		ol = rte_pktmbuf_offload_get(bufs[i],
-				RTE_PKTMBUF_OL_CRYPTO_SYM);
-		if (unlikely(ol == NULL)) {
+	for (i = 0; i < nb_ops; i++) {
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+		if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+			MB_LOG_ERR("PMD only supports symmetric crypto "
+				"operation requests, op (%p) is not a "
+				"symmetric operation.", op);
 			qp->stats.enqueue_err_count++;
 			goto flush_jobs;
 		}
-
-		sess = get_session(qp, &ol->op.crypto);
+#endif
+		sess = get_session(qp, ops[i]);
 		if (unlikely(sess == NULL)) {
 			qp->stats.enqueue_err_count++;
 			goto flush_jobs;
 		}
 
-		job = process_crypto_op(qp, bufs[i], &ol->op.crypto, sess);
+		job = process_crypto_op(qp, ops[i], sess);
 		if (unlikely(job == NULL)) {
 			qp->stats.enqueue_err_count++;
 			goto flush_jobs;
@@ -565,15 +576,15 @@ flush_jobs:
 }
 
 static uint16_t
-aesni_mb_pmd_dequeue_burst(void *queue_pair,
-		struct rte_mbuf **bufs,	uint16_t nb_bufs)
+aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
 {
 	struct aesni_mb_qp *qp = queue_pair;
 
 	unsigned nb_dequeued;
 
-	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
-			(void **)bufs, nb_bufs);
+	nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+			(void **)ops, nb_ops);
 	qp->stats.dequeued_count += nb_dequeued;
 
 	return nb_dequeued;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index d56de12..b1dd103 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -138,9 +138,9 @@ aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
 	return 0;
 }
 
-/** Create a ring to place process packets on */
+/** Create a ring to place processed operations on */
 static struct rte_ring *
-aesni_mb_pmd_qp_create_processed_pkts_ring(struct aesni_mb_qp *qp,
+aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
 		unsigned ring_size, int socket_id)
 {
 	struct rte_ring *r;
@@ -148,12 +148,12 @@ aesni_mb_pmd_qp_create_processed_pkts_ring(struct aesni_mb_qp *qp,
 	r = rte_ring_lookup(qp->name);
 	if (r) {
 		if (r->prod.size >= ring_size) {
-			MB_LOG_INFO("Reusing existing ring %s for processed packets",
+			MB_LOG_INFO("Reusing existing ring %s for processed ops",
 					 qp->name);
 			return r;
 		}
 
-		MB_LOG_ERR("Unable to reuse existing ring %s for processed packets",
+		MB_LOG_ERR("Unable to reuse existing ring %s for processed ops",
 				 qp->name);
 		return NULL;
 	}
@@ -189,9 +189,9 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 
 	qp->ops = &job_ops[internals->vector_mode];
 
-	qp->processed_pkts = aesni_mb_pmd_qp_create_processed_pkts_ring(qp,
+	qp->processed_ops = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
 			qp_conf->nb_descriptors, socket_id);
-	if (qp->processed_pkts == NULL)
+	if (qp->processed_ops == NULL)
 		goto qp_setup_cleanup;
 
 	qp->sess_mp = dev->data->session_pool;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 0aed177..949d9a6 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -146,8 +146,8 @@ struct aesni_mb_qp {
 	/**< Vector mode dependent pointer table of the multi-buffer APIs */
 	MB_MGR mb_mgr;
 	/**< Multi-buffer instance */
-	struct rte_ring *processed_pkts;
-	/**< Ring for placing process packets */
+	struct rte_ring *processed_ops;
+	/**< Ring for placing process operations */
 	struct rte_mempool *sess_mp;
 	/**< Session Mempool */
 	struct rte_cryptodev_stats stats;
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index e7b9027..11f7fb2 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -72,7 +72,7 @@ static inline uint32_t
 adf_modulo(uint32_t data, uint32_t shift);
 
 static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg);
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
 
 void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
 		void *session)
@@ -275,15 +275,16 @@ unsigned qat_crypto_sym_get_session_private_size(
 }
 
 
-uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
-		uint16_t nb_pkts)
+uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
-	register uint32_t nb_pkts_sent = 0;
-	register struct rte_mbuf **cur_tx_pkt = tx_pkts;
+	register uint32_t nb_ops_sent = 0;
+	register struct rte_crypto_op **cur_op = ops;
 	register int ret;
-	uint16_t nb_pkts_possible = nb_pkts;
+	uint16_t nb_ops_possible = nb_ops;
 	register uint8_t *base_addr;
 	register uint32_t tail;
 	int overflow;
@@ -294,47 +295,44 @@ uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
 	tail = queue->tail;
 
 	/* Find how many can actually fit on the ring */
-	overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_pkts)
+	overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
 				- queue->max_inflights;
 	if (overflow > 0) {
 		rte_atomic16_sub(&tmp_qp->inflights16, overflow);
-		nb_pkts_possible = nb_pkts - overflow;
-		if (nb_pkts_possible == 0)
+		nb_ops_possible = nb_ops - overflow;
+		if (nb_ops_possible == 0)
 			return 0;
 	}
 
-	while (nb_pkts_sent != nb_pkts_possible) {
-
-		ret = qat_alg_write_mbuf_entry(*cur_tx_pkt,
-			base_addr + tail);
+	while (nb_ops_sent != nb_ops_possible) {
+		ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
 		if (ret != 0) {
 			tmp_qp->stats.enqueue_err_count++;
-			if (nb_pkts_sent == 0)
+			if (nb_ops_sent == 0)
 				return 0;
 			goto kick_tail;
 		}
 
 		tail = adf_modulo(tail + queue->msg_size, queue->modulo);
-		nb_pkts_sent++;
-		cur_tx_pkt++;
+		nb_ops_sent++;
+		cur_op++;
 	}
 kick_tail:
 	WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
 			queue->hw_queue_number, tail);
 	queue->tail = tail;
-	tmp_qp->stats.enqueued_count += nb_pkts_sent;
-	return nb_pkts_sent;
+	tmp_qp->stats.enqueued_count += nb_ops_sent;
+	return nb_ops_sent;
 }
 
 uint16_t
-qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
-				uint16_t nb_pkts)
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
 {
-	struct rte_mbuf_offload *ol;
 	struct qat_queue *queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 	uint32_t msg_counter = 0;
-	struct rte_mbuf *rx_mbuf;
+	struct rte_crypto_op *rx_op;
 	struct icp_qat_fw_comn_resp *resp_msg;
 
 	queue = &(tmp_qp->rx_q);
@@ -342,17 +340,20 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
 			((uint8_t *)queue->base_addr + queue->head);
 
 	while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
-			msg_counter != nb_pkts) {
-		rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
-		ol = rte_pktmbuf_offload_get(rx_mbuf,
-					RTE_PKTMBUF_OL_CRYPTO_SYM);
+			msg_counter != nb_ops) {
+		rx_op = (struct rte_crypto_op *)(uintptr_t)
+				(resp_msg->opaque_data);
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+		rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
+				sizeof(struct icp_qat_fw_comn_resp));
+#endif
 		if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
 				ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
 					resp_msg->comn_hdr.comn_status)) {
-			ol->op.crypto.status =
-					RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+			rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
 		} else {
-			ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 		}
 		*(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
 		queue->head = adf_modulo(queue->head +
@@ -361,9 +362,8 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
 		resp_msg = (struct icp_qat_fw_comn_resp *)
 					((uint8_t *)queue->base_addr +
 							queue->head);
-
-		*rx_pkts = rx_mbuf;
-		rx_pkts++;
+		*ops = rx_op;
+		ops++;
 		msg_counter++;
 	}
 	if (msg_counter > 0) {
@@ -377,38 +377,36 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
 }
 
 static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
 {
-	struct rte_mbuf_offload *ol;
-
 	struct qat_session *ctx;
 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
 	struct icp_qat_fw_la_auth_req_params *auth_param;
 	register struct icp_qat_fw_la_bulk_req *qat_req;
 
-	ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
-	if (unlikely(ol == NULL)) {
-		PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
-				"to (%p) mbuf.", mbuf);
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+		PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
+				"operation requests, op (%p) is not a "
+				"symmetric operation.", op);
 		return -EINVAL;
 	}
-
-	if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
+#endif
+	if (unlikely(op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
 		PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
-				" requests mbuf (%p) is sessionless.", mbuf);
+				" requests, op (%p) is sessionless.", op);
 		return -EINVAL;
 	}
 
-	if (unlikely(ol->op.crypto.session->type
-					!= RTE_CRYPTODEV_QAT_SYM_PMD)) {
+	if (unlikely(op->sym->session->type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
 		PMD_DRV_LOG(ERR, "Session was not created for this device");
 		return -EINVAL;
 	}
 
-	ctx = (struct qat_session *)ol->op.crypto.session->_private;
+	ctx = (struct qat_session *)op->sym->session->_private;
 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
 	*qat_req = ctx->fw_req;
-	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)mbuf;
+	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
 
 	/*
 	 * The following code assumes:
@@ -416,37 +414,37 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
 	 * - always in place.
 	 */
 	qat_req->comn_mid.dst_length =
-			qat_req->comn_mid.src_length = mbuf->data_len;
+			qat_req->comn_mid.src_length =
+					rte_pktmbuf_data_len(op->sym->m_src);
 	qat_req->comn_mid.dest_data_addr =
 			qat_req->comn_mid.src_data_addr =
-					rte_pktmbuf_mtophys(mbuf);
-
+					rte_pktmbuf_mtophys(op->sym->m_src);
 	cipher_param = (void *)&qat_req->serv_specif_rqpars;
 	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
 
-	cipher_param->cipher_length = ol->op.crypto.data.to_cipher.length;
-	cipher_param->cipher_offset = ol->op.crypto.data.to_cipher.offset;
-	if (ol->op.crypto.iv.length &&
-		(ol->op.crypto.iv.length <=
-				sizeof(cipher_param->u.cipher_IV_array))) {
+	cipher_param->cipher_length = op->sym->cipher.data.length;
+	cipher_param->cipher_offset = op->sym->cipher.data.offset;
+	if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
+			sizeof(cipher_param->u.cipher_IV_array))) {
 		rte_memcpy(cipher_param->u.cipher_IV_array,
-				ol->op.crypto.iv.data, ol->op.crypto.iv.length);
+				op->sym->cipher.iv.data,
+				op->sym->cipher.iv.length);
 	} else {
 		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
 				qat_req->comn_hdr.serv_specif_flags,
 				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr = ol->op.crypto.iv.phys_addr;
+		cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr;
 	}
-	if (ol->op.crypto.digest.phys_addr) {
+	if (op->sym->auth.digest.phys_addr) {
 		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
 				qat_req->comn_hdr.serv_specif_flags,
 				ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
-		auth_param->auth_res_addr = ol->op.crypto.digest.phys_addr;
+		auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
 	}
-	auth_param->auth_off = ol->op.crypto.data.to_hash.offset;
-	auth_param->auth_len = ol->op.crypto.data.to_hash.length;
-	auth_param->u1.aad_adr = ol->op.crypto.additional_auth.phys_addr;
+	auth_param->auth_off = op->sym->auth.data.offset;
+	auth_param->auth_len = op->sym->auth.data.length;
 
+	auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
 	/* (GCM) aad length(240 max) will be at this location after precompute */
 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
@@ -457,9 +455,19 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
 	}
 	auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
 
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
 	rte_hexdump(stdout, "qat_req:", qat_req,
 			sizeof(struct icp_qat_fw_la_bulk_req));
+	rte_hexdump(stdout, "src_data:",
+			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+			rte_pktmbuf_data_len(op->sym->m_src));
+	rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
+			op->sym->cipher.iv.length);
+	rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+			op->sym->auth.digest.length);
+	rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
+			op->sym->auth.aad.length);
 #endif
 	return 0;
 }
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index e9f71fe..9323383 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -115,12 +115,12 @@ extern void
 qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
 
 
-uint16_t
-qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
-		uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
 
-uint16_t
-qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
-		uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
 
 #endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 85700fc..5e51aca 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -92,8 +92,8 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
 	cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
 	cryptodev->dev_ops = &crypto_qat_ops;
 
-	cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
-	cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
+	cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
+	cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst;
 
 
 	internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 9b6b7ef..6aaa7c0 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -104,6 +104,11 @@ struct pkt_buffer {
 	struct rte_mbuf *buffer[MAX_PKT_BURST];
 };
 
+struct op_buffer {
+	unsigned len;
+	struct rte_crypto_op *buffer[MAX_PKT_BURST];
+};
+
 #define MAX_RX_QUEUE_PER_LCORE 16
 #define MAX_TX_QUEUE_PER_PORT 16
 
@@ -159,8 +164,8 @@ struct lcore_queue_conf {
 	unsigned nb_crypto_devs;
 	unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
 
-	struct pkt_buffer crypto_pkt_buf[RTE_MAX_ETHPORTS];
-	struct pkt_buffer tx_pkt_buf[RTE_MAX_ETHPORTS];
+	struct op_buffer op_buf[RTE_MAX_ETHPORTS];
+	struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
 } __rte_cache_aligned;
 
 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
@@ -180,7 +185,7 @@ static const struct rte_eth_conf port_conf = {
 };
 
 struct rte_mempool *l2fwd_pktmbuf_pool;
-struct rte_mempool *l2fwd_mbuf_ol_pool;
+struct rte_mempool *l2fwd_crypto_op_pool;
 
 /* Per-port statistics struct */
 struct l2fwd_port_statistics {
@@ -294,20 +299,21 @@ static int
 l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
 		struct l2fwd_crypto_params *cparams)
 {
-	struct rte_mbuf **pkt_buffer;
+	struct rte_crypto_op **op_buffer;
 	unsigned ret;
 
-	pkt_buffer = (struct rte_mbuf **)
-			qconf->crypto_pkt_buf[cparams->dev_id].buffer;
+	op_buffer = (struct rte_crypto_op **)
+			qconf->op_buf[cparams->dev_id].buffer;
+
+	ret = rte_cryptodev_enqueue_burst(cparams->dev_id,
+			cparams->qp_id,	op_buffer, (uint16_t) n);
 
-	ret = rte_cryptodev_enqueue_burst(cparams->dev_id, cparams->qp_id,
-			pkt_buffer, (uint16_t) n);
 	crypto_statistics[cparams->dev_id].enqueued += ret;
 	if (unlikely(ret < n)) {
 		crypto_statistics[cparams->dev_id].errors += (n - ret);
 		do {
-			rte_pktmbuf_offload_free(pkt_buffer[ret]->offload_ops);
-			rte_pktmbuf_free(pkt_buffer[ret]);
+			rte_pktmbuf_free(op_buffer[ret]->sym->m_src);
+			rte_crypto_op_free(op_buffer[ret]);
 		} while (++ret < n);
 	}
 
@@ -315,7 +321,8 @@ l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
 }
 
 static int
-l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
+l2fwd_crypto_enqueue(struct rte_crypto_op *op,
+		struct l2fwd_crypto_params *cparams)
 {
 	unsigned lcore_id, len;
 	struct lcore_queue_conf *qconf;
@@ -323,23 +330,23 @@ l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
 	lcore_id = rte_lcore_id();
 
 	qconf = &lcore_queue_conf[lcore_id];
-	len = qconf->crypto_pkt_buf[cparams->dev_id].len;
-	qconf->crypto_pkt_buf[cparams->dev_id].buffer[len] = m;
+	len = qconf->op_buf[cparams->dev_id].len;
+	qconf->op_buf[cparams->dev_id].buffer[len] = op;
 	len++;
 
-	/* enough pkts to be sent */
+	/* enough ops to be sent */
 	if (len == MAX_PKT_BURST) {
 		l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams);
 		len = 0;
 	}
 
-	qconf->crypto_pkt_buf[cparams->dev_id].len = len;
+	qconf->op_buf[cparams->dev_id].len = len;
 	return 0;
 }
 
 static int
 l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
-		struct rte_mbuf_offload *ol,
+		struct rte_crypto_op *op,
 		struct l2fwd_crypto_params *cparams)
 {
 	struct ether_hdr *eth_hdr;
@@ -377,43 +384,43 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
 	}
 
 	/* Set crypto operation data parameters */
-	rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
+	rte_crypto_op_attach_sym_session(op, cparams->session);
 
 	/* Append space for digest to end of packet */
-	ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
+	op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
 			cparams->digest_length);
-	ol->op.crypto.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+	op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
 			rte_pktmbuf_pkt_len(m) - cparams->digest_length);
-	ol->op.crypto.digest.length = cparams->digest_length;
+	op->sym->auth.digest.length = cparams->digest_length;
+
+	op->sym->auth.data.offset = ipdata_offset;
+	op->sym->auth.data.length = data_len;
 
-	ol->op.crypto.iv.data = cparams->iv_key.data;
-	ol->op.crypto.iv.phys_addr = cparams->iv_key.phys_addr;
-	ol->op.crypto.iv.length = cparams->iv_key.length;
 
-	ol->op.crypto.data.to_cipher.offset = ipdata_offset;
-	ol->op.crypto.data.to_cipher.length = data_len;
+	op->sym->cipher.iv.data = cparams->iv_key.data;
+	op->sym->cipher.iv.phys_addr = cparams->iv_key.phys_addr;
+	op->sym->cipher.iv.length = cparams->iv_key.length;
 
-	ol->op.crypto.data.to_hash.offset = ipdata_offset;
-	ol->op.crypto.data.to_hash.length = data_len;
+	op->sym->cipher.data.offset = ipdata_offset;
+	op->sym->cipher.data.length = data_len;
 
-	rte_pktmbuf_offload_attach(m, ol);
+	op->sym->m_src = m;
 
-	return l2fwd_crypto_enqueue(m, cparams);
+	return l2fwd_crypto_enqueue(op, cparams);
 }
 
 
 /* Send the burst of packets on an output interface */
 static int
-l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)
+l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
+		uint8_t port)
 {
 	struct rte_mbuf **pkt_buffer;
 	unsigned ret;
-	unsigned queueid = 0;
 
-	pkt_buffer = (struct rte_mbuf **)qconf->tx_pkt_buf[port].buffer;
+	pkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer;
 
-	ret = rte_eth_tx_burst(port, (uint16_t) queueid, pkt_buffer,
-			(uint16_t)n);
+	ret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n);
 	port_statistics[port].tx += ret;
 	if (unlikely(ret < n)) {
 		port_statistics[port].dropped += (n - ret);
@@ -435,8 +442,8 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
 	lcore_id = rte_lcore_id();
 
 	qconf = &lcore_queue_conf[lcore_id];
-	len = qconf->tx_pkt_buf[port].len;
-	qconf->tx_pkt_buf[port].buffer[len] = m;
+	len = qconf->pkt_buf[port].len;
+	qconf->pkt_buf[port].buffer[len] = m;
 	len++;
 
 	/* enough pkts to be sent */
@@ -445,7 +452,7 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
 		len = 0;
 	}
 
-	qconf->tx_pkt_buf[port].len = len;
+	qconf->pkt_buf[port].len = len;
 	return 0;
 }
 
@@ -505,6 +512,8 @@ static void
 l2fwd_main_loop(struct l2fwd_crypto_options *options)
 {
 	struct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST];
+	struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
+
 	unsigned lcore_id = rte_lcore_id();
 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
 	unsigned i, j, portid, nb_rx;
@@ -565,12 +574,12 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
 		if (unlikely(diff_tsc > drain_tsc)) {
 
 			for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
-				if (qconf->tx_pkt_buf[portid].len == 0)
+				if (qconf->pkt_buf[portid].len == 0)
 					continue;
 				l2fwd_send_burst(&lcore_queue_conf[lcore_id],
-						 qconf->tx_pkt_buf[portid].len,
+						 qconf->pkt_buf[portid].len,
 						 (uint8_t) portid);
-				qconf->tx_pkt_buf[portid].len = 0;
+				qconf->pkt_buf[portid].len = 0;
 			}
 
 			/* if timer is enabled */
@@ -599,8 +608,6 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
 		 * Read packet from RX queues
 		 */
 		for (i = 0; i < qconf->nb_rx_ports; i++) {
-			struct rte_mbuf_offload *ol;
-
 			portid = qconf->rx_port_list[i];
 
 			cparams = &port_cparams[i];
@@ -610,44 +617,49 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
 
 			port_statistics[portid].rx += nb_rx;
 
-			/* Enqueue packets from Crypto device*/
-			for (j = 0; j < nb_rx; j++) {
-				m = pkts_burst[j];
-				ol = rte_pktmbuf_offload_alloc(
-						l2fwd_mbuf_ol_pool,
-						RTE_PKTMBUF_OL_CRYPTO_SYM);
+			if (nb_rx) {
 				/*
-				 * If we can't allocate a offload, then drop
+				 * If we can't allocate a crypto_ops, then drop
 				 * the rest of the burst and dequeue and
 				 * process the packets to free offload structs
 				 */
-				if (unlikely(ol == NULL)) {
-					for (; j < nb_rx; j++) {
-						rte_pktmbuf_free(pkts_burst[j]);
-						port_statistics[portid].dropped++;
-					}
-					break;
+				if (rte_crypto_op_bulk_alloc(
+						l2fwd_crypto_op_pool,
+						RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+						ops_burst, nb_rx) !=
+								nb_rx) {
+					for (j = 0; j < nb_rx; j++)
+						rte_pktmbuf_free(pkts_burst[i]);
+
+					nb_rx = 0;
 				}
 
-				rte_prefetch0(rte_pktmbuf_mtod(m, void *));
-				rte_prefetch0((void *)ol);
+				/* Enqueue packets from Crypto device*/
+				for (j = 0; j < nb_rx; j++) {
+					m = pkts_burst[j];
 
-				l2fwd_simple_crypto_enqueue(m, ol, cparams);
+					l2fwd_simple_crypto_enqueue(m,
+							ops_burst[j], cparams);
+				}
 			}
 
 			/* Dequeue packets from Crypto device */
-			nb_rx = rte_cryptodev_dequeue_burst(
-					cparams->dev_id, cparams->qp_id,
-					pkts_burst, MAX_PKT_BURST);
-			crypto_statistics[cparams->dev_id].dequeued += nb_rx;
-
-			/* Forward crypto'd packets */
-			for (j = 0; j < nb_rx; j++) {
-				m = pkts_burst[j];
-				rte_pktmbuf_offload_free(m->offload_ops);
-				rte_prefetch0(rte_pktmbuf_mtod(m, void *));
-				l2fwd_simple_forward(m, portid);
-			}
+			do {
+				nb_rx = rte_cryptodev_dequeue_burst(
+						cparams->dev_id, cparams->qp_id,
+						ops_burst, MAX_PKT_BURST);
+
+				crypto_statistics[cparams->dev_id].dequeued +=
+						nb_rx;
+
+				/* Forward crypto'd packets */
+				for (j = 0; j < nb_rx; j++) {
+					m = ops_burst[j]->sym->m_src;
+
+					rte_crypto_op_free(ops_burst[j]);
+					l2fwd_simple_forward(m, portid);
+				}
+			} while (nb_rx == MAX_PKT_BURST);
 		}
 	}
 }
@@ -1384,15 +1396,17 @@ main(int argc, char **argv)
 		rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n");
 
 	/* create the mbuf pool */
-	l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 128,
-		0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+	l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512,
+			sizeof(struct rte_crypto_op),
+			RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
 	if (l2fwd_pktmbuf_pool == NULL)
 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
 
 	/* create crypto op pool */
-	l2fwd_mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
-			"mbuf_offload_pool", NB_MBUF, 128, 0, rte_socket_id());
-	if (l2fwd_mbuf_ol_pool == NULL)
+	l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, 0,
+			rte_socket_id());
+	if (l2fwd_crypto_op_pool == NULL)
 		rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
 
 	/* Enable Ethernet ports */
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index 620c00b..e0c4fb9 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -44,23 +44,369 @@
 extern "C" {
 #endif
 
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+#include "rte_crypto_sym.h"
+
+/** Crypto operation types */
+enum rte_crypto_op_type {
+	RTE_CRYPTO_OP_TYPE_UNDEFINED,
+	/**< Undefined operation type */
+	RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+	/**< Symmetric operation */
+};
+
 /** Status of crypto operation */
 enum rte_crypto_op_status {
 	RTE_CRYPTO_OP_STATUS_SUCCESS,
 	/**< Operation completed successfully */
-	RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
-	/**< Operation not yet submitted to a cryptodev */
+	RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+	/**< Operation has not yet been processed by a crypto device */
 	RTE_CRYPTO_OP_STATUS_ENQUEUED,
 	/**< Operation is enqueued on device */
 	RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
 	/**< Authentication verification failed */
+	RTE_CRYPTO_OP_STATUS_INVALID_SESSION,
+	/**<
+	 * Symmetric operation failed due to invalid session arguments, or if
+	 * in session-less mode, failed to allocate private operation material.
+	 */
 	RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
 	/**< Operation failed due to invalid arguments in request */
 	RTE_CRYPTO_OP_STATUS_ERROR,
 	/**< Error handling operation */
 };
 
-#include <rte_crypto_sym.h>
+/**
+ * Cryptographic Operation.
+ *
+ * This structure contains data relating to performing cryptographic
+ * operations. This operation structure is used to contain any operation which
+ * is supported by the cryptodev API, PMDs should check the type parameter to
+ * verify that the operation is a support function of the device. Crypto
+ * operations are enqueued and dequeued in crypto PMDs using the
+ * rte_cryptodev_enqueue_burst() / rte_cryptodev_dequeue_burst() .
+ */
+struct rte_crypto_op {
+	enum rte_crypto_op_type type;
+	/**< operation type */
+
+	enum rte_crypto_op_status status;
+	/**<
+	 * operation status - this is reset to
+	 * RTE_CRYPTO_OP_STATUS_NOT_PROCESSED on allocation from mempool and
+	 * will be set to RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation
+	 * is successfully processed by a crypto PMD
+	 */
+
+	struct rte_mempool *mempool;
+	/**< crypto operation mempool which operation is allocated from */
+
+	phys_addr_t phys_addr;
+	/**< physical address of crypto operation */
+
+	void *opaque_data;
+	/**< Opaque pointer for user data */
+
+	union {
+		struct rte_crypto_sym_op *sym;
+		/**< Symmetric operation parameters */
+	}; /**< operation specific parameters */
+} __rte_cache_aligned;
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param	op	The crypto operation to be reset.
+ * @param	type	The crypto operation type.
+ */
+static inline void
+__rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type)
+{
+	op->type = type;
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+	switch (type) {
+	case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
+		/** Symmetric operation structure starts after the end of the
+		 * rte_crypto_op structure.
+		 */
+		op->sym = (struct rte_crypto_sym_op *)(op + 1);
+		op->type = type;
+
+		__rte_crypto_sym_op_reset(op->sym);
+		break;
+	default:
+		break;
+	}
+
+	op->opaque_data = NULL;
+}
+
+/**
+ * Private data structure belonging to a crypto symmetric operation pool.
+ */
+struct rte_crypto_op_pool_private {
+	enum rte_crypto_op_type type;
+	/**< Crypto op pool type operation. */
+	uint16_t priv_size;
+	/**< Size of private area in each crypto operation. */
+};
+
+
+/**
+ * Returns the size of private data allocated with each rte_crypto_op object by
+ * the mempool
+ *
+ * @param	mempool	rte_crypto_op mempool
+ *
+ * @return	private data size
+ */
+static inline uint16_t
+__rte_crypto_op_get_priv_data_size(struct rte_mempool *mempool)
+{
+	struct rte_crypto_op_pool_private *priv =
+			rte_mempool_get_priv(mempool);
+
+	return priv->priv_size;
+}
+
+
+/**
+ * Creates a crypto operation pool
+ *
+ * @param	name		pool name
+ * @param	type		crypto operation type, use
+ *				RTE_CRYPTO_OP_TYPE_UNDEFINED for a pool which
+ *				supports all operation types
+ * @param	nb_elts		number of elements in pool
+ * @param	cache_size	Number of elements to cache on lcore, see
+ *				*rte_mempool_create* for further details about
+ *				cache size
+ * @param	priv_size	Size of private data to allocate with each
+ *				operation
+ * @param	socket_id	Socket to allocate memory on
+ *
+ * @return
+ *  - On success pointer to mempool
+ *  - On failure NULL
+ */
+extern struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+		int socket_id);
+
+/**
+ * Bulk allocate raw element from mempool and return as crypto operations
+ *
+ * @param	mempool		crypto operation mempool.
+ * @param	type		crypto operation type.
+ * @param	ops		Array to place allocated crypto operations
+ * @param	nb_ops		Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns  number of ops allocated
+ */
+static inline int
+__rte_crypto_op_raw_bulk_alloc(struct rte_mempool *mempool,
+		enum rte_crypto_op_type type,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct rte_crypto_op_pool_private *priv;
+
+	priv = rte_mempool_get_priv(mempool);
+	if (unlikely(priv->type != type &&
+			priv->type != RTE_CRYPTO_OP_TYPE_UNDEFINED))
+		return -EINVAL;
+
+	if (rte_mempool_get_bulk(mempool, (void **)ops, nb_ops) == 0)
+		return nb_ops;
+
+	return 0;
+}
+
+/**
+ * Allocate a crypto operation from a mempool with default parameters set
+ *
+ * @param	mempool	crypto operation mempool
+ * @param	type	operation type to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_op *
+rte_crypto_op_alloc(struct rte_mempool *mempool, enum rte_crypto_op_type type)
+{
+	struct rte_crypto_op *op = NULL;
+	int retval;
+
+	retval = __rte_crypto_op_raw_bulk_alloc(mempool, type, &op, 1);
+	if (unlikely(retval != 1))
+		return NULL;
+
+	__rte_crypto_op_reset(op, type);
+
+	return op;
+}
+
+
+/**
+ * Bulk allocate crypto operations from a mempool with default parameters set
+ *
+ * @param	mempool	crypto operation mempool
+ * @param	type	operation type to allocate
+ * @param	ops	Array to place allocated crypto operations
+ * @param	nb_ops	Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+
+static inline unsigned
+rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+		enum rte_crypto_op_type type,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	int i;
+
+	if (unlikely(__rte_crypto_op_raw_bulk_alloc(mempool, type, ops, nb_ops)
+			!= nb_ops))
+		return 0;
+
+	for (i = 0; i < nb_ops; i++)
+		__rte_crypto_op_reset(ops[i], type);
+
+	return nb_ops;
+}
+
+
+
+/**
+ * Returns a pointer to the private data of a crypto operation if
+ * that operation has enough capacity for requested size.
+ *
+ * @param	op	crypto operation.
+ * @param	size	size of space requested in private data.
+ *
+ * @returns
+ * - if sufficient space available returns pointer to start of private data
+ * - if insufficient space returns NULL
+ */
+static inline void *
+__rte_crypto_op_get_priv_data(struct rte_crypto_op *op, uint32_t size)
+{
+	uint32_t priv_size;
+
+	if (likely(op->mempool != NULL)) {
+		priv_size = __rte_crypto_op_get_priv_data_size(op->mempool);
+
+		if (likely(priv_size >= size))
+			return (void *)((uint8_t *)(op + 1) +
+					sizeof(struct rte_crypto_sym_op));
+	}
+
+	return NULL;
+}
+
+/**
+ * free crypto operation structure
+ * If operation has been allocate from a rte_mempool, then the operation will
+ * be returned to the mempool.
+ *
+ * @param	op	symmetric crypto operation
+ */
+static inline void
+rte_crypto_op_free(struct rte_crypto_op *op)
+{
+	if (op != NULL && op->mempool != NULL)
+		rte_mempool_put(op->mempool, op);
+}
+
+/**
+ * Allocate a symmetric crypto operation in the private data of an mbuf.
+ *
+ * @param	m	mbuf which is associated with the crypto operation, the
+ *			operation will be allocated in the private data of that
+ *			mbuf.
+ *
+ * @returns
+ * - On success returns a pointer to the crypto operation.
+ * - On failure returns NULL.
+ */
+static inline struct rte_crypto_op *
+rte_crypto_sym_op_alloc_from_mbuf_priv_data(struct rte_mbuf *m)
+{
+	if (unlikely(m == NULL))
+		return NULL;
+
+	/*
+	 * check that the mbuf's private data size is sufficient to contain a
+	 * crypto operation
+	 */
+	if (unlikely(m->priv_size < (sizeof(struct rte_crypto_op) +
+			sizeof(struct rte_crypto_sym_op))))
+		return NULL;
+
+	/* private data starts immediately after the mbuf header in the mbuf. */
+	struct rte_crypto_op *op = (struct rte_crypto_op *)(m + 1);
+
+	__rte_crypto_op_reset(op, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+
+	op->mempool = NULL;
+	op->sym->m_src = m;
+
+	return op;
+}
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type and configures
+ * the chaining of the xforms in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+rte_crypto_op_sym_xforms_alloc(struct rte_crypto_op *op, uint8_t nb_xforms)
+{
+	void *priv_data;
+	uint32_t size;
+
+	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+		return NULL;
+
+	size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
+
+	priv_data = __rte_crypto_op_get_priv_data(op, size);
+	if (priv_data == NULL)
+		return NULL;
+
+	return __rte_crypto_sym_op_sym_xforms_alloc(op->sym, priv_data,
+			nb_xforms);
+}
+
+
+/**
+ * Attach a session to a crypto operation
+ *
+ * @param	op	crypto operation, must be of type symmetric
+ * @param	sess	cryptodev session
+ */
+static inline int
+rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+		struct rte_cryptodev_sym_session *sess)
+{
+	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+		return -1;
+
+	return __rte_crypto_sym_op_attach_sym_session(op->sym, sess);
+}
 
 #ifdef __cplusplus
 }
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index 270510e..831bbf1 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -46,6 +46,8 @@
 extern "C" {
 #endif
 
+#include <string.h>
+
 #include <rte_mbuf.h>
 #include <rte_memory.h>
 #include <rte_mempool.h>
@@ -111,7 +113,6 @@ enum rte_crypto_cipher_operation {
 	/**< Decrypt cipher operation */
 };
 
-
 /**
  * Symmetric Cipher Setup Data.
  *
@@ -128,8 +129,8 @@ struct rte_crypto_cipher_xform {
 	/**< Cipher algorithm */
 
 	struct {
-		uint8_t *data;  /**< pointer to key data */
-		size_t length;  /**< key length in bytes */
+		uint8_t *data;	/**< pointer to key data */
+		size_t length;	/**< key length in bytes */
 	} key;
 	/**< Cipher key
 	 *
@@ -255,8 +256,8 @@ struct rte_crypto_auth_xform {
 	/**< Authentication algorithm selection */
 
 	struct {
-		uint8_t *data;  /**< pointer to key data */
-		size_t length;  /**< key length in bytes */
+		uint8_t *data;	/**< pointer to key data */
+		size_t length;	/**< key length in bytes */
 	} key;
 	/**< Authentication key data.
 	 * The authentication key length MUST be less than or equal to the
@@ -347,21 +348,24 @@ enum rte_crypto_sym_op_sess_type {
 };
 
 
+struct rte_cryptodev_sym_session;
+
 /**
- * Cryptographic Operation Data.
+ * Symmetric Cryptographic Operation.
  *
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
- * call for performing cipher, hash, or a combined hash and cipher operations.
+ * This structure contains data relating to performing symmetric cryptographic
+ * processing on a referenced mbuf data buffer.
+ *
+ * When a symmetric crypto operation is enqueued with the device for processing
+ * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
+ * which contains the source data which the crypto operation is to be performed
+ * on.
  */
 struct rte_crypto_sym_op {
-	enum rte_crypto_sym_op_sess_type type;
-	enum rte_crypto_op_status status;
+	struct rte_mbuf *m_src;	/**< source mbuf */
+	struct rte_mbuf *m_dst;	/**< destination mbuf */
 
-	struct {
-		struct rte_mbuf *m;	/**< Destination mbuf */
-		uint8_t offset;		/**< Data offset */
-	} dst;
+	enum rte_crypto_sym_op_sess_type type;
 
 	union {
 		struct rte_cryptodev_sym_session *session;
@@ -372,7 +376,7 @@ struct rte_crypto_sym_op {
 
 	struct {
 		struct {
-			 uint32_t offset;
+			uint32_t offset;
 			 /**< Starting point for cipher processing, specified
 			  * as number of bytes from start of data in the source
 			  * buffer. The result of the cipher operation will be
@@ -380,7 +384,7 @@ struct rte_crypto_sym_op {
 			  * this location.
 			  */
 
-			 uint32_t length;
+			uint32_t length;
 			 /**< The message length, in bytes, of the source buffer
 			  * on which the cryptographic operation will be
 			  * computed. This must be a multiple of the block size
@@ -399,17 +403,68 @@ struct rte_crypto_sym_op {
 			  * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
 			  * field should be set to 0.
 			  */
-		} to_cipher; /**< Data offsets and length for ciphering */
+		} data; /**< Data offsets and length for ciphering */
+
+		struct {
+			uint8_t *data;
+			/**< Initialisation Vector or Counter.
+			 *
+			 * - For block ciphers in CBC or F8 mode, or for Kasumi
+			 * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+			 * Initialisation Vector (IV) value.
+			 *
+			 * - For block ciphers in CTR mode, this is the counter.
+			 *
+			 * - For GCM mode, this is either the IV (if the length
+			 * is 96 bits) or J0 (for other sizes), where J0 is as
+			 * defined by NIST SP800-38D. Regardless of the IV
+			 * length, a full 16 bytes needs to be allocated.
+			 *
+			 * - For CCM mode, the first byte is reserved, and the
+			 * nonce should be written starting at &iv[1] (to allow
+			 * space for the implementation to write in the flags
+			 * in the first byte). Note that a full 16 bytes should
+			 * be allocated, even though the length field will
+			 * have a value less than this.
+			 *
+			 * - For AES-XTS, this is the 128bit tweak, i, from
+			 * IEEE Std 1619-2007.
+			 *
+			 * For optimum performance, the data pointed to SHOULD
+			 * be 8-byte aligned.
+			 */
+			phys_addr_t phys_addr;
+			uint16_t length;
+			/**< Length of valid IV data.
+			 *
+			 * - For block ciphers in CBC or F8 mode, or for Kasumi
+			 * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+			 * length of the IV (which must be the same as the
+			 * block length of the cipher).
+			 *
+			 * - For block ciphers in CTR mode, this is the length
+			 * of the counter (which must be the same as the block
+			 * length of the cipher).
+			 *
+			 * - For GCM mode, this is either 12 (for 96-bit IVs)
+			 * or 16, in which case data points to J0.
+			 *
+			 * - For CCM mode, this is the length of the nonce,
+			 * which can be in the range 7 to 13 inclusive.
+			 */
+		} iv;	/**< Initialisation vector parameters */
+	} cipher;
 
+	struct {
 		struct {
-			 uint32_t offset;
+			uint32_t offset;
 			 /**< Starting point for hash processing, specified as
 			  * number of bytes from start of packet in source
 			  * buffer.
 			  *
 			  * @note
 			  * For CCM and GCM modes of operation, this field is
-			  * ignored. The field @ref additional_auth field
+			  * ignored. The field @ref aad field
 			  * should be set instead.
 			  *
 			  * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
@@ -417,180 +472,169 @@ struct rte_crypto_sym_op {
 			  * of the AAD data in the source buffer.
 			  */
 
-			 uint32_t length;
+			uint32_t length;
 			 /**< The message length, in bytes, of the source
 			  * buffer that the hash will be computed on.
 			  *
 			  * @note
 			  * For CCM and GCM modes of operation, this field is
-			  * ignored. The field @ref additional_auth field
-			  * should be set instead.
+			  * ignored. The field @ref aad field should be set
+			  * instead.
 			  *
 			  * @note
 			  * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
 			  * of operation, this field specifies the length of
 			  * the AAD data in the source buffer.
 			  */
-		} to_hash; /**< Data offsets and length for authentication */
-	} data;	/**< Details of data to be operated on */
+		} data; /**< Data offsets and length for authentication */
 
-	struct {
-		uint8_t *data;
-		/**< Initialisation Vector or Counter.
-		 *
-		 * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
-		 * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
-		 * Vector (IV) value.
-		 *
-		 * - For block ciphers in CTR mode, this is the counter.
-		 *
-		 * - For GCM mode, this is either the IV (if the length is 96
-		 * bits) or J0 (for other sizes), where J0 is as defined by
-		 * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
-		 * needs to be allocated.
-		 *
-		 * - For CCM mode, the first byte is reserved, and the nonce
-		 * should be written starting at &iv[1] (to allow space for the
-		 * implementation to write in the flags in the first byte).
-		 * Note that a full 16 bytes should be allocated, even though
-		 * the length field will have a value less than this.
-		 *
-		 * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
-		 * 1619-2007.
-		 *
-		 * For optimum performance, the data pointed to SHOULD be
-		 * 8-byte aligned.
-		 */
-		phys_addr_t phys_addr;
-		size_t length;
-		/**< Length of valid IV data.
-		 *
-		 * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
-		 * mode, or for SNOW3G in UEA2 mode, this is the length of the
-		 * IV (which must be the same as the block length of the
-		 * cipher).
-		 *
-		 * - For block ciphers in CTR mode, this is the length of the
-		 * counter (which must be the same as the block length of the
-		 * cipher).
-		 *
-		 * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
-		 * which case data points to J0.
-		 *
-		 * - For CCM mode, this is the length of the nonce, which can
-		 * be in the range 7 to 13 inclusive.
-		 */
-	} iv;	/**< Initialisation vector parameters */
-
-	struct {
-		uint8_t *data;
-		/**< If this member of this structure is set this is a
-		 * pointer to the location where the digest result should be
-		 * inserted (in the case of digest generation) or where the
-		 * purported digest exists (in the case of digest
-		 * verification).
-		 *
-		 * At session creation time, the client specified the digest
-		 * result length with the digest_length member of the @ref
-		 * rte_crypto_auth_xform structure. For physical crypto
-		 * devices the caller must allocate at least digest_length of
-		 * physically contiguous memory at this location.
-		 *
-		 * For digest generation, the digest result will overwrite
-		 * any data at this location.
-		 *
-		 * @note
-		 * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
-		 * "digest result" read "authentication tag T".
-		 *
-		 * If this member is not set the digest result is understood
-		 * to be in the destination buffer for digest generation, and
-		 * in the source buffer for digest verification. The location
-		 * of the digest result in this case is immediately following
-		 * the region over which the digest is computed.
-		 */
-		phys_addr_t phys_addr;	/**< Physical address of digest */
-		uint32_t length;	/**< Length of digest */
-	} digest; /**< Digest parameters */
+		struct {
+			uint8_t *data;
+			/**< If this member of this structure is set this is a
+			 * pointer to the location where the digest result
+			 * should be inserted (in the case of digest generation)
+			 * or where the purported digest exists (in the case of
+			 * digest verification).
+			 *
+			 * At session creation time, the client specified the
+			 * digest result length with the digest_length member
+			 * of the @ref rte_crypto_auth_xform structure. For
+			 * physical crypto devices the caller must allocate at
+			 * least digest_length of physically contiguous memory
+			 * at this location.
+			 *
+			 * For digest generation, the digest result will
+			 * overwrite any data at this location.
+			 *
+			 * @note
+			 * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+			 * "digest result" read "authentication tag T".
+			 *
+			 * If this member is not set the digest result is
+			 * understood to be in the destination buffer for
+			 * digest generation, and in the source buffer for
+			 * digest verification. The location of the digest
+			 * result in this case is immediately following the
+			 * region over which the digest is computed.
+			 */
+			phys_addr_t phys_addr;
+			/**< Physical address of digest */
+			uint16_t length;
+			/**< Length of digest */
+		} digest; /**< Digest parameters */
 
-	struct {
-		uint8_t *data;
-		/**< Pointer to Additional Authenticated Data (AAD) needed for
-		 * authenticated cipher mechanisms (CCM and GCM), and to the IV
-		 * for SNOW3G authentication
-		 * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
-		 * authentication mechanisms this pointer is ignored.
-		 *
-		 * The length of the data pointed to by this field is set up
-		 * for the session in the @ref rte_crypto_auth_xform structure
-		 * as part of the @ref rte_cryptodev_sym_session_create function
-		 * call.  This length must not exceed 240 bytes.
-		 *
-		 * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
-		 * caller should setup this field as follows:
-		 *
-		 * - the nonce should be written starting at an offset of one
-		 *   byte into the array, leaving room for the implementation
-		 *   to write in the flags to the first byte.
-		 *
-		 * - the additional  authentication data itself should be
-		 *   written starting at an offset of 18 bytes into the array,
-		 *   leaving room for the length encoding in the first two
-		 *   bytes of the second block.
-		 *
-		 * - the array should be big enough to hold the above fields,
-		 *   plus any padding to round this up to the nearest multiple
-		 *   of the block size (16 bytes).  Padding will be added by
-		 *   the implementation.
-		 *
-		 * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
-		 * caller should setup this field as follows:
-		 *
-		 * - the AAD is written in starting at byte 0
-		 * - the array must be big enough to hold the AAD, plus any
-		 *   space to round this up to the nearest multiple of the
-		 *   block size (16 bytes).
-		 *
-		 * @note
-		 * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
-		 * operation, this field is not used and should be set to 0.
-		 * Instead the AAD data should be placed in the source buffer.
-		 */
-		phys_addr_t phys_addr;	/**< physical address */
-		uint32_t length;	/**< Length of digest */
-	} additional_auth;
-	/**< Additional authentication parameters */
-
-	struct rte_mempool *pool;
-	/**< mempool used to allocate crypto op */
-
-	void *user_data;
-	/**< opaque pointer for user data */
-};
+		struct {
+			uint8_t *data;
+			/**< Pointer to Additional Authenticated Data (AAD)
+			 * needed for authenticated cipher mechanisms (CCM and
+			 * GCM), and to the IV for SNOW3G authentication
+			 * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+			 * authentication mechanisms this pointer is ignored.
+			 *
+			 * The length of the data pointed to by this field is
+			 * set up for the session in the @ref
+			 * rte_crypto_auth_xform structure as part of the @ref
+			 * rte_cryptodev_session_create function call.  This
+			 * length must not exceed 240 bytes.
+			 *
+			 * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM),
+			 * the caller should setup this field as follows:
+			 *
+			 * - the nonce should be written starting at an offset
+			 * of one byte into the array, leaving room for the
+			 * implementation to write in the flags to the first
+			 *  byte.
+			 *
+			 * - the additional  authentication data itself should
+			 * be written starting at an offset of 18 bytes into
+			 * the array, leaving room for the length encoding in
+			 * the first two bytes of the second block.
+			 *
+			 * - the array should be big enough to hold the above
+			 *  fields, plus any padding to round this up to the
+			 *  nearest multiple of the block size (16 bytes).
+			 *  Padding will be added by the implementation.
+			 *
+			 * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+			 * caller should setup this field as follows:
+			 *
+			 * - the AAD is written in starting at byte 0
+			 * - the array must be big enough to hold the AAD, plus
+			 * any space to round this up to the nearest multiple
+			 * of the block size (16 bytes).
+			 *
+			 * @note
+			 * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+			 * operation, this field is not used and should be set
+			 * to 0. Instead the AAD data should be placed in the
+			 * source buffer.
+			 */
+			phys_addr_t phys_addr;	/**< physical address */
+			uint16_t length;	/**< Length of digest */
+		} aad;
+		/**< Additional authentication parameters */
+	} auth;
+} __rte_cache_aligned;
 
 
 /**
- * Reset the fields of a crypto operation to their default values.
+ * Reset the fields of a symmetric operation to their default values.
  *
  * @param	op	The crypto operation to be reset.
  */
 static inline void
 __rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
 {
+	memset(op, 0, sizeof(*op));
+
 	op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
-	op->dst.m = NULL;
-	op->dst.offset = 0;
 }
 
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type to
+ * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
+ * in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+__rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
+		void *priv_data, uint8_t nb_xforms)
+{
+	struct rte_crypto_sym_xform *xform;
+
+	sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
+
+	do {
+		xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
+		xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
+	} while (xform);
+
+	return sym_op->xform;
+}
+
+
+/**
+ * Attach a session to a symmetric crypto operation
+ *
+ * @param	op	crypto operation
+ * @param	sess	cryptodev session
+ */
+static inline int
+__rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
 		struct rte_cryptodev_sym_session *sess)
 {
-	op->session = sess;
-	op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+	sym_op->session = sess;
+	sym_op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+
+	return 0;
 }
 
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index c7fef6a..4632ca3 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1056,3 +1056,79 @@ rte_cryptodev_sym_session_free(uint8_t dev_id,
 
 	return NULL;
 }
+
+/** Initialise rte_crypto_op mempool element */
+static void
+rte_crypto_op_init(struct rte_mempool *mempool,
+		void *opaque_arg,
+		void *_op_data,
+		__rte_unused unsigned i)
+{
+	struct rte_crypto_op *op = _op_data;
+	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
+
+	memset(_op_data, 0, mempool->elt_size);
+
+	__rte_crypto_op_reset(op, type);
+
+	op->phys_addr = rte_mem_virt2phy(_op_data);
+	op->mempool = mempool;
+}
+
+
+struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+		int socket_id)
+{
+	struct rte_crypto_op_pool_private *priv;
+
+	unsigned elt_size = sizeof(struct rte_crypto_op) +
+			sizeof(struct rte_crypto_sym_op) +
+			priv_size;
+
+	/* lookup mempool in case already allocated */
+	struct rte_mempool *mp = rte_mempool_lookup(name);
+
+	if (mp != NULL) {
+		priv = (struct rte_crypto_op_pool_private *)
+				rte_mempool_get_priv(mp);
+
+		if (mp->elt_size != elt_size ||
+				mp->cache_size < cache_size ||
+				mp->size < nb_elts ||
+				priv->priv_size <  priv_size) {
+			mp = NULL;
+			CDEV_LOG_ERR("Mempool %s already exists but with "
+					"incompatible parameters", name);
+			return NULL;
+		}
+		return mp;
+	}
+
+	mp = rte_mempool_create(
+			name,
+			nb_elts,
+			elt_size,
+			cache_size,
+			sizeof(struct rte_crypto_op_pool_private),
+			NULL,
+			NULL,
+			rte_crypto_op_init,
+			&type,
+			socket_id,
+			0);
+
+	if (mp == NULL) {
+		CDEV_LOG_ERR("Failed to create mempool %s", name);
+		return NULL;
+	}
+
+	priv = (struct rte_crypto_op_pool_private *)
+			rte_mempool_get_priv(mp);
+
+	priv->priv_size = priv_size;
+	priv->type = type;
+
+	return mp;
+}
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index f4b38c1..aab8cff 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -40,16 +40,14 @@
  * Defines RTE Crypto Device APIs for the provisioning of cipher and
  * authentication operations.
  *
- * @warning
  * @b EXPERIMENTAL: this API may change without prior notice
+ *
  */
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-#include "stddef.h"
-
 #include "rte_crypto.h"
 #include "rte_dev.h"
 
@@ -67,6 +65,9 @@ enum rte_cryptodev_type {
 	RTE_CRYPTODEV_QAT_SYM_PMD,	/**< QAT PMD Symmetric Crypto */
 };
 
+
+extern const char **rte_cyptodev_names;
+
 /* Logging Macros */
 
 #define CDEV_LOG_ERR(fmt, args...)					\
@@ -214,8 +215,6 @@ struct rte_cryptodev_config {
 /**
  * Configure a device.
  *
- * EXPERIMENTAL: this API file may change without prior notice
- *
  * This function must be invoked first before any other function in the
  * API. This function can also be re-invoked when a device is in the
  * stopped state.
@@ -411,12 +410,12 @@ rte_cryptodev_callback_unregister(uint8_t dev_id,
 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
 
 
-typedef uint16_t (*dequeue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
-		uint16_t nb_pkts);
+typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
+		struct rte_crypto_op **ops,	uint16_t nb_ops);
 /**< Dequeue processed packets from queue pair of a device. */
 
-typedef uint16_t (*enqueue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
-		uint16_t nb_pkts);
+typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
+		struct rte_crypto_op **ops,	uint16_t nb_ops);
 /**< Enqueue packets for processing on queue pair of a device. */
 
 
@@ -489,66 +488,65 @@ struct rte_cryptodev_data {
 extern struct rte_cryptodev *rte_cryptodevs;
 /**
  *
- * Dequeue a burst of processed packets from a queue of the crypto device.
- * The dequeued packets are stored in *rte_mbuf* structures whose pointers are
- * supplied in the *pkts* array.
+ * Dequeue a burst of processed crypto operations from a queue on the crypto
+ * device. The dequeued operation are stored in *rte_crypto_op* structures
+ * whose pointers are supplied in the *ops* array.
  *
- * The rte_crypto_dequeue_burst() function returns the number of packets
- * actually dequeued, which is the number of *rte_mbuf* data structures
- * effectively supplied into the *pkts* array.
+ * The rte_cryptodev_dequeue_burst() function returns the number of ops
+ * actually dequeued, which is the number of *rte_crypto_op* data structures
+ * effectively supplied into the *ops* array.
  *
- * A return value equal to *nb_pkts* indicates that the queue contained
- * at least *rx_pkts* packets, and this is likely to signify that other
- * received packets remain in the input queue. Applications implementing
- * a "retrieve as much received packets as possible" policy can check this
- * specific case and keep invoking the rte_crypto_dequeue_burst() function
- * until a value less than *nb_pkts* is returned.
+ * A return value equal to *nb_ops* indicates that the queue contained
+ * at least *nb_ops* operations, and this is likely to signify that other
+ * processed operations remain in the devices output queue. Applications
+ * implementing a "retrieve as many processed operations as possible" policy
+ * can check this specific case and keep invoking the
+ * rte_cryptodev_dequeue_burst() function until a value less than
+ * *nb_ops* is returned.
  *
- * The rte_crypto_dequeue_burst() function does not provide any error
+ * The rte_cryptodev_dequeue_burst() function does not provide any error
  * notification to avoid the corresponding overhead.
  *
- * @param	dev_id		The identifier of the device.
+ * @param	dev_id		The symmetric crypto device identifier
  * @param	qp_id		The index of the queue pair from which to
  *				retrieve processed packets. The value must be
  *				in the range [0, nb_queue_pair - 1] previously
  *				supplied to rte_cryptodev_configure().
- * @param	pkts		The address of an array of pointers to
- *				*rte_mbuf* structures that must be large enough
- *				to store *nb_pkts* pointers in it.
- * @param	nb_pkts		The maximum number of packets to dequeue.
+ * @param	ops		The address of an array of pointers to
+ *				*rte_crypto_op* structures that must be
+ *				large enough to store *nb_ops* pointers in it.
+ * @param	nb_ops		The maximum number of operations to dequeue.
  *
  * @return
- *   - The number of packets actually dequeued, which is the number
- *   of pointers to *rte_mbuf* structures effectively supplied to the
- *   *pkts* array.
+ *   - The number of operations actually dequeued, which is the number
+ *   of pointers to *rte_crypto_op* structures effectively supplied to the
+ *   *ops* array.
  */
 static inline uint16_t
 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
-		struct rte_mbuf **pkts, uint16_t nb_pkts)
+		struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
 
-	nb_pkts = (*dev->dequeue_burst)
-			(dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+	nb_ops = (*dev->dequeue_burst)
+			(dev->data->queue_pairs[qp_id], ops, nb_ops);
 
-	return nb_pkts;
+	return nb_ops;
 }
 
 /**
- * Enqueue a burst of packets for processing on a crypto device.
- *
- * The rte_crypto_enqueue_burst() function is invoked to place packets
- * on the queue *queue_id* of the device designated by its *dev_id*.
+ * Enqueue a burst of operations for processing on a crypto device.
  *
- * The *nb_pkts* parameter is the number of packets to process which are
- * supplied in the *pkts* array of *rte_mbuf* structures.
+ * The rte_cryptodev_enqueue_burst() function is invoked to place
+ * crypto operations on the queue *qp_id* of the device designated by
+ * its *dev_id*.
  *
- * The rte_crypto_enqueue_burst() function returns the number of packets it
- * actually sent. A return value equal to *nb_pkts* means that all packets
- * have been sent.
+ * The *nb_ops* parameter is the number of operations to process which are
+ * supplied in the *ops* array of *rte_crypto_op* structures.
  *
- * Each mbuf in the *pkts* array must have a valid *rte_mbuf_offload* structure
- * attached which contains a valid crypto operation.
+ * The rte_cryptodev_enqueue_burst() function returns the number of
+ * operations it actually enqueued for processing. A return value equal to
+ * *nb_ops* means that all packets have been enqueued.
  *
  * @param	dev_id		The identifier of the device.
  * @param	qp_id		The index of the queue pair which packets are
@@ -556,25 +554,25 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
  *				must be in the range [0, nb_queue_pairs - 1]
  *				previously supplied to
  *				 *rte_cryptodev_configure*.
- * @param	pkts		The address of an array of *nb_pkts* pointers
- *				to *rte_mbuf* structures which contain the
- *				output packets.
- * @param	nb_pkts		The number of packets to transmit.
+ * @param	ops		The address of an array of *nb_ops* pointers
+ *				to *rte_crypto_op* structures which contain
+ *				the crypto operations to be processed.
+ * @param	nb_ops		The number of operations to process.
  *
  * @return
- * The number of packets actually enqueued on the crypto device. The return
- * value can be less than the value of the *nb_pkts* parameter when the
- * crypto devices queue is full or has been filled up.
- * The number of packets is 0 if the device hasn't been started.
+ * The number of operations actually enqueued on the crypto device. The return
+ * value can be less than the value of the *nb_ops* parameter when the
+ * crypto devices queue is full or if invalid parameters are specified in
+ * a *rte_crypto_op*.
  */
 static inline uint16_t
 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
-		struct rte_mbuf **pkts, uint16_t nb_pkts)
+		struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
 
 	return (*dev->enqueue_burst)(
-			dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+			dev->data->queue_pairs[qp_id], ops, nb_ops);
 }
 
 
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index a46af6f..b682184 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -1,4 +1,4 @@
-DPDK_2.2 {
+DPDK_16.04 {
 	global:
 
 	rte_cryptodevs;
@@ -27,6 +27,7 @@ DPDK_2.2 {
 	rte_cryptodev_queue_pair_setup;
 	rte_cryptodev_queue_pair_start;
 	rte_cryptodev_queue_pair_stop;
+	rte_crypto_op_pool_create;
 
 	local: *;
-};
+};
\ No newline at end of file
-- 
2.1.0

  parent reply	other threads:[~2016-03-10 13:52 UTC|newest]

Thread overview: 62+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-01-30 13:07 [dpdk-dev] [PATCH] cryptodev: API change to rte_crypto_op bursts Declan Doherty
2016-02-08 17:50 ` Trahe, Fiona
2016-02-19 11:01 ` [dpdk-dev] [PATCH v2 0/2] cryptodev API changes Declan Doherty
2016-02-19 11:01   ` [dpdk-dev] [PATCH v2 1/2] cryptodev: API tidy and changes to support future extensions Declan Doherty
2016-02-19 11:01   ` [dpdk-dev] [PATCH v2 2/2] cryptodev: change burst API to be crypto op oriented Declan Doherty
2016-02-22 11:17     ` Trahe, Fiona
2016-02-22 18:23     ` Trahe, Fiona
2016-02-22 18:56     ` Trahe, Fiona
2016-02-26 17:30   ` [dpdk-dev] [PATCH v3 0/2] cryptodev API changes Declan Doherty
2016-02-26 17:30     ` [dpdk-dev] [PATCH v3 1/2] cryptodev: API tidy and changes to support future extensions Declan Doherty
2016-02-26 17:30     ` [dpdk-dev] [PATCH v3 2/2] cryptodev: change burst API to be crypto op oriented Declan Doherty
2016-02-29 16:00     ` [dpdk-dev] [PATCH v3 0/2] cryptodev API changes Declan Doherty
2016-02-29 16:52     ` [dpdk-dev] [PATCH v4 " Declan Doherty
2016-02-29 16:52       ` [dpdk-dev] [PATCH v4 1/2] cryptodev: API tidy and changes to support future extensions Declan Doherty
2016-03-04 14:43         ` Thomas Monjalon
2016-02-29 16:52       ` [dpdk-dev] [PATCH v4 2/2] cryptodev: change burst API to be crypto op oriented Declan Doherty
2016-02-29 17:47       ` [dpdk-dev] [PATCH v4 0/2] cryptodev API changes Trahe, Fiona
2016-03-04 17:17       ` [dpdk-dev] [PATCH v5 " Fiona Trahe
2016-03-04 17:38         ` Thomas Monjalon
2016-03-04 17:43           ` Trahe, Fiona
2016-03-04 17:45             ` Thomas Monjalon
2016-03-04 18:01               ` Trahe, Fiona
2016-03-04 17:39         ` Trahe, Fiona
2016-03-15  6:48         ` Cao, Min
2016-03-04 17:17       ` [dpdk-dev] [PATCH v5 1/2] This patch splits symmetric specific definitions and functions away from the common crypto APIs to facilitate the future extension and expansion of the cryptodev framework, in order to allow asymmetric crypto operations to be introduced at a later date, as well as to clean the logical structure of the public includes. The patch also introduces the _sym prefix to symmetric specific structure and functions to improve clarity in the API Fiona Trahe
2016-03-04 17:17       ` [dpdk-dev] [PATCH v5 2/2] This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts rte_crypto_op's rather than the current implementation which operates on rte_mbuf bursts, this simplifies the burst processing in the crypto PMDs and the use of crypto operations in general Fiona Trahe
2016-03-04 18:29       ` [dpdk-dev] [PATCH v6 0/2] cryptodev API changes Fiona Trahe
2016-03-07 11:50         ` [dpdk-dev] [PATCH v7 " Fiona Trahe
2016-03-07 13:23           ` De Lara Guarch, Pablo
2016-03-07 13:53           ` Jain, Deepak K
2016-03-10 13:42           ` [dpdk-dev] [PATCH v8 0/5] " Fiona Trahe
2016-03-10 14:05             ` De Lara Guarch, Pablo
2016-03-10 15:41             ` [dpdk-dev] [PATCH v9 " Fiona Trahe
2016-03-10 16:14               ` Thomas Monjalon
2016-03-10 15:41             ` [dpdk-dev] [PATCH v9 1/5] cryptodev: code cleanup Fiona Trahe
2016-03-10 15:41             ` [dpdk-dev] [PATCH v9 2/5] cryptodev: refactor to partition common from symmetric-specific code Fiona Trahe
2016-03-10 15:41             ` [dpdk-dev] [PATCH v9 3/5] cryptodev: remove unused phys_addr field from key Fiona Trahe
2016-03-10 15:41             ` [dpdk-dev] [PATCH v9 4/5] cryptodev: change burst API to be crypto op oriented Fiona Trahe
2016-03-10 16:03               ` Thomas Monjalon
2016-03-10 16:13                 ` Trahe, Fiona
2016-03-10 15:41             ` [dpdk-dev] [PATCH v9 5/5] mbuf_offload: remove library Fiona Trahe
2016-03-14  8:59             ` [dpdk-dev] [PATCH v8 0/5] cryptodev API changes Cao, Min
2016-03-10 13:42           ` [dpdk-dev] [PATCH v8 1/5] cryptodev: code cleanup Fiona Trahe
2016-03-10 13:42           ` [dpdk-dev] [PATCH v8 2/5] cryptodev: refactor to partition common from symmetric-specific code Fiona Trahe
2016-03-10 13:42           ` [dpdk-dev] [PATCH v8 3/5] cryptodev: remove unused phys_addr field from key Fiona Trahe
2016-03-10 13:43           ` Fiona Trahe [this message]
2016-03-10 14:03             ` [dpdk-dev] [PATCH v8 4/5] cryptodev: change burst API to be crypto op oriented Thomas Monjalon
2016-03-10 13:43           ` [dpdk-dev] [PATCH v8 5/5] mbuf_offload: remove library Fiona Trahe
2016-03-15  5:21           ` [dpdk-dev] [PATCH v7 0/2] cryptodev API changes Cao, Min
2016-03-07 11:50         ` [dpdk-dev] [PATCH v7 1/2] cryptodev: API tidy and changes to support future extensions Fiona Trahe
2016-03-08 14:10           ` Thomas Monjalon
2016-03-10 10:30             ` Trahe, Fiona
2016-03-07 11:50         ` [dpdk-dev] [PATCH v7 2/2] cryptodev: change burst API to be crypto op oriented Fiona Trahe
2016-03-08 14:32           ` Thomas Monjalon
2016-03-09 12:55             ` Trahe, Fiona
2016-03-10 10:28               ` Trahe, Fiona
2016-03-15  6:46         ` [dpdk-dev] [PATCH v6 0/2] cryptodev API changes Cao, Min
2016-03-04 18:29       ` [dpdk-dev] [PATCH v6 1/2] cryptodev: API tidy and changes to support future extensions Fiona Trahe
2016-03-04 18:29       ` [dpdk-dev] [PATCH v6 2/2] cryptodev: change burst API to be crypto op oriented Fiona Trahe
2016-03-15  6:57       ` [dpdk-dev] [PATCH v4 0/2] cryptodev API changes Cao, Min
2016-03-15  7:07     ` [dpdk-dev] [PATCH v3 " Cao, Min
2016-03-15  7:48   ` [dpdk-dev] [PATCH v2 " Cao, Min

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1457617381-8296-5-git-send-email-fiona.trahe@intel.com \
    --to=fiona.trahe@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).