DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] crypto/aesni_mb: use of archtecture independent macros
@ 2018-11-23 14:04 Fan Zhang
  2018-12-11 12:29 ` [dpdk-dev] [PATCH v2] crypto/aesni_mb: use architure independent marcos Fan Zhang
  0 siblings, 1 reply; 15+ messages in thread
From: Fan Zhang @ 2018-11-23 14:04 UTC (permalink / raw)
  To: dev; +Cc: akhil.goyal

This patch updates the aesni_mb to use IMB_* arch independent
macros to reduce the code size and future maintaining effort.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
This patch targets 19.02 release

 drivers/crypto/aesni_mb/aesni_mb_ops.h             | 302 ---------------------
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c         | 160 ++++++-----
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c     |  15 +-
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h |  33 ++-
 4 files changed, 118 insertions(+), 392 deletions(-)
 delete mode 100644 drivers/crypto/aesni_mb/aesni_mb_ops.h

diff --git a/drivers/crypto/aesni_mb/aesni_mb_ops.h b/drivers/crypto/aesni_mb/aesni_mb_ops.h
deleted file mode 100644
index 575d6a5b8..000000000
--- a/drivers/crypto/aesni_mb/aesni_mb_ops.h
+++ /dev/null
@@ -1,302 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 Intel Corporation
- */
-
-#ifndef _AESNI_MB_OPS_H_
-#define _AESNI_MB_OPS_H_
-
-#ifndef LINUX
-#define LINUX
-#endif
-
-#include <intel-ipsec-mb.h>
-
-/*
- * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
- * so if macro is not defined, it means that the version is 0.49.
- */
-#if !defined(IMB_VERSION_NUM)
-#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
-#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
-#endif
-
-enum aesni_mb_vector_mode {
-	RTE_AESNI_MB_NOT_SUPPORTED = 0,
-	RTE_AESNI_MB_SSE,
-	RTE_AESNI_MB_AVX,
-	RTE_AESNI_MB_AVX2,
-	RTE_AESNI_MB_AVX512
-};
-
-typedef void (*md5_one_block_t)(const void *data, void *digest);
-
-typedef void (*sha1_one_block_t)(const void *data, void *digest);
-typedef void (*sha224_one_block_t)(const void *data, void *digest);
-typedef void (*sha256_one_block_t)(const void *data, void *digest);
-typedef void (*sha384_one_block_t)(const void *data, void *digest);
-typedef void (*sha512_one_block_t)(const void *data, void *digest);
-
-typedef void (*aes_keyexp_128_t)
-		(const void *key, void *enc_exp_keys, void *dec_exp_keys);
-typedef void (*aes_keyexp_192_t)
-		(const void *key, void *enc_exp_keys, void *dec_exp_keys);
-typedef void (*aes_keyexp_256_t)
-		(const void *key, void *enc_exp_keys, void *dec_exp_keys);
-typedef void (*aes_xcbc_expand_key_t)
-		(const void *key, void *exp_k1, void *k2, void *k3);
-typedef void (*aes_cmac_sub_key_gen_t)
-		(const void *exp_key, void *k2, void *k3);
-typedef void (*aes_cmac_keyexp_t)
-		(const void *key, void *keyexp);
-typedef void (*aes_gcm_keyexp_t)
-		(const void *key, struct gcm_key_data *keyexp);
-
-/** Multi-buffer library function pointer table */
-struct aesni_mb_op_fns {
-	struct {
-		init_mb_mgr_t init_mgr;
-		/**< Initialise scheduler  */
-		get_next_job_t get_next;
-		/**< Get next free job structure */
-		submit_job_t submit;
-		/**< Submit job to scheduler */
-		get_completed_job_t get_completed_job;
-		/**< Get completed job */
-		flush_job_t flush_job;
-		/**< flush jobs from manager */
-	} job;
-	/**< multi buffer manager functions */
-
-	struct {
-		struct {
-			md5_one_block_t md5;
-			/**< MD5 one block hash */
-			sha1_one_block_t sha1;
-			/**< SHA1 one block hash */
-			sha224_one_block_t sha224;
-			/**< SHA224 one block hash */
-			sha256_one_block_t sha256;
-			/**< SHA256 one block hash */
-			sha384_one_block_t sha384;
-			/**< SHA384 one block hash */
-			sha512_one_block_t sha512;
-			/**< SHA512 one block hash */
-		} one_block;
-		/**< one block hash functions */
-
-		struct {
-			aes_keyexp_128_t aes128;
-			/**< AES128 key expansions */
-			aes_keyexp_192_t aes192;
-			/**< AES192 key expansions */
-			aes_keyexp_256_t aes256;
-			/**< AES256 key expansions */
-			aes_xcbc_expand_key_t aes_xcbc;
-			/**< AES XCBC key epansions */
-			aes_cmac_sub_key_gen_t aes_cmac_subkey;
-			/**< AES CMAC subkey expansions */
-			aes_cmac_keyexp_t aes_cmac_expkey;
-			/**< AES CMAC key expansions */
-			aes_gcm_keyexp_t aes_gcm_128;
-			/**< AES GCM 128 key expansions */
-			aes_gcm_keyexp_t aes_gcm_192;
-			/**< AES GCM 192 key expansions */
-			aes_gcm_keyexp_t aes_gcm_256;
-			/**< AES GCM 256 key expansions */
-		} keyexp;
-		/**< Key expansion functions */
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
-		struct {
-			hash_fn_t sha1;
-			hash_fn_t sha224;
-			hash_fn_t sha256;
-			hash_fn_t sha384;
-			hash_fn_t sha512;
-		} multi_block;
-		/** multi block hash functions */
-#endif
-	} aux;
-	/**< Auxiliary functions */
-};
-
-
-static const struct aesni_mb_op_fns job_ops[] = {
-		[RTE_AESNI_MB_NOT_SUPPORTED] = {
-			.job = {
-				NULL
-			},
-			.aux = {
-				.one_block = {
-					NULL
-				},
-				.keyexp = {
-					NULL
-				},
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
-				.multi_block = {
-					NULL
-				}
-#endif
-
-			}
-		},
-		[RTE_AESNI_MB_SSE] = {
-			.job = {
-				init_mb_mgr_sse,
-				get_next_job_sse,
-				submit_job_sse,
-				get_completed_job_sse,
-				flush_job_sse
-			},
-			.aux = {
-				.one_block = {
-					md5_one_block_sse,
-					sha1_one_block_sse,
-					sha224_one_block_sse,
-					sha256_one_block_sse,
-					sha384_one_block_sse,
-					sha512_one_block_sse
-				},
-				.keyexp = {
-					aes_keyexp_128_sse,
-					aes_keyexp_192_sse,
-					aes_keyexp_256_sse,
-					aes_xcbc_expand_key_sse,
-					aes_cmac_subkey_gen_sse,
-					aes_keyexp_128_enc_sse,
-					aes_gcm_pre_128_sse,
-					aes_gcm_pre_192_sse,
-					aes_gcm_pre_256_sse
-				},
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
-				.multi_block = {
-					sha1_sse,
-					sha224_sse,
-					sha256_sse,
-					sha384_sse,
-					sha512_sse
-				}
-#endif
-			}
-		},
-		[RTE_AESNI_MB_AVX] = {
-			.job = {
-				init_mb_mgr_avx,
-				get_next_job_avx,
-				submit_job_avx,
-				get_completed_job_avx,
-				flush_job_avx
-			},
-			.aux = {
-				.one_block = {
-					md5_one_block_avx,
-					sha1_one_block_avx,
-					sha224_one_block_avx,
-					sha256_one_block_avx,
-					sha384_one_block_avx,
-					sha512_one_block_avx
-				},
-				.keyexp = {
-					aes_keyexp_128_avx,
-					aes_keyexp_192_avx,
-					aes_keyexp_256_avx,
-					aes_xcbc_expand_key_avx,
-					aes_cmac_subkey_gen_avx,
-					aes_keyexp_128_enc_avx,
-					aes_gcm_pre_128_avx_gen2,
-					aes_gcm_pre_192_avx_gen2,
-					aes_gcm_pre_256_avx_gen2
-				},
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
-				.multi_block = {
-					sha1_avx,
-					sha224_avx,
-					sha256_avx,
-					sha384_avx,
-					sha512_avx
-				}
-#endif
-			}
-		},
-		[RTE_AESNI_MB_AVX2] = {
-			.job = {
-				init_mb_mgr_avx2,
-				get_next_job_avx2,
-				submit_job_avx2,
-				get_completed_job_avx2,
-				flush_job_avx2
-			},
-			.aux = {
-				.one_block = {
-					md5_one_block_avx2,
-					sha1_one_block_avx2,
-					sha224_one_block_avx2,
-					sha256_one_block_avx2,
-					sha384_one_block_avx2,
-					sha512_one_block_avx2
-				},
-				.keyexp = {
-					aes_keyexp_128_avx2,
-					aes_keyexp_192_avx2,
-					aes_keyexp_256_avx2,
-					aes_xcbc_expand_key_avx2,
-					aes_cmac_subkey_gen_avx2,
-					aes_keyexp_128_enc_avx2,
-					aes_gcm_pre_128_avx_gen4,
-					aes_gcm_pre_192_avx_gen4,
-					aes_gcm_pre_256_avx_gen4
-				},
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
-				.multi_block = {
-					sha1_avx2,
-					sha224_avx2,
-					sha256_avx2,
-					sha384_avx2,
-					sha512_avx2
-				}
-#endif
-			}
-		},
-		[RTE_AESNI_MB_AVX512] = {
-			.job = {
-				init_mb_mgr_avx512,
-				get_next_job_avx512,
-				submit_job_avx512,
-				get_completed_job_avx512,
-				flush_job_avx512
-			},
-			.aux = {
-				.one_block = {
-					md5_one_block_avx512,
-					sha1_one_block_avx512,
-					sha224_one_block_avx512,
-					sha256_one_block_avx512,
-					sha384_one_block_avx512,
-					sha512_one_block_avx512
-				},
-				.keyexp = {
-					aes_keyexp_128_avx512,
-					aes_keyexp_192_avx512,
-					aes_keyexp_256_avx512,
-					aes_xcbc_expand_key_avx512,
-					aes_cmac_subkey_gen_avx512,
-					aes_keyexp_128_enc_avx512,
-					aes_gcm_pre_128_avx_gen4,
-					aes_gcm_pre_192_avx_gen4,
-					aes_gcm_pre_256_avx_gen4
-				},
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
-				.multi_block = {
-					sha1_avx512,
-					sha224_avx512,
-					sha256_avx512,
-					sha384_avx512,
-					sha512_avx512
-				}
-#endif
-			}
-		}
-};
-
-
-#endif /* _AESNI_MB_OPS_H_ */
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 83250e32c..0f593b2a3 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -100,7 +100,7 @@ aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
 
 /** Set session authentication parameters */
 static int
-aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
+aesni_mb_set_session_auth_parameters(const MB_MGR *mb_mgr,
 		struct aesni_mb_session *sess,
 		const struct rte_crypto_sym_xform *xform)
 {
@@ -135,13 +135,16 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
 			return -EINVAL;
 		}
 		sess->auth.gen_digest_len = sess->auth.req_digest_len;
-		(*mb_ops->aux.keyexp.aes_xcbc)(xform->auth.key.data,
+
+		IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
 				sess->auth.xcbc.k1_expanded,
 				sess->auth.xcbc.k2, sess->auth.xcbc.k3);
 		return 0;
 	}
 
 	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
+		uint32_t dust[4*15];
+
 		sess->auth.algo = AES_CMAC;
 
 		uint16_t cmac_digest_len = get_digest_byte_length(AES_CMAC);
@@ -169,10 +172,10 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
 			sess->auth.gen_digest_len = cmac_digest_len;
 		else
 			sess->auth.gen_digest_len = sess->auth.req_digest_len;
-		(*mb_ops->aux.keyexp.aes_cmac_expkey)(xform->auth.key.data,
-				sess->auth.cmac.expkey);
 
-		(*mb_ops->aux.keyexp.aes_cmac_subkey)(sess->auth.cmac.expkey,
+		IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
+				sess->auth.cmac.expkey, dust);
+		IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
 				sess->auth.cmac.skey1, sess->auth.cmac.skey2);
 		return 0;
 	}
@@ -180,14 +183,14 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
 	switch (xform->auth.algo) {
 	case RTE_CRYPTO_AUTH_MD5_HMAC:
 		sess->auth.algo = MD5;
-		hash_oneblock_fn = mb_ops->aux.one_block.md5;
+		hash_oneblock_fn = mb_mgr->md5_one_block;
 		break;
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
 		sess->auth.algo = SHA1;
-		hash_oneblock_fn = mb_ops->aux.one_block.sha1;
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+		hash_oneblock_fn = mb_mgr->sha1_one_block;
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 52, 0)
 		if (xform->auth.key.length > get_auth_algo_blocksize(SHA1)) {
-			mb_ops->aux.multi_block.sha1(
+			IMB_SHA1(mb_mgr,
 				xform->auth.key.data,
 				xform->auth.key.length,
 				hashed_key);
@@ -197,10 +200,10 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
 		break;
 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
 		sess->auth.algo = SHA_224;
-		hash_oneblock_fn = mb_ops->aux.one_block.sha224;
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+		hash_oneblock_fn = mb_mgr->sha224_one_block;
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 52, 0)
 		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_224)) {
-			mb_ops->aux.multi_block.sha224(
+			IMB_SHA224(mb_mgr,
 				xform->auth.key.data,
 				xform->auth.key.length,
 				hashed_key);
@@ -210,10 +213,10 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
 		break;
 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
 		sess->auth.algo = SHA_256;
-		hash_oneblock_fn = mb_ops->aux.one_block.sha256;
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+		hash_oneblock_fn = mb_mgr->sha256_one_block;
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 52, 0)
 		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_256)) {
-			mb_ops->aux.multi_block.sha256(
+			IMB_SHA256(mb_mgr,
 				xform->auth.key.data,
 				xform->auth.key.length,
 				hashed_key);
@@ -223,10 +226,10 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
 		break;
 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
 		sess->auth.algo = SHA_384;
-		hash_oneblock_fn = mb_ops->aux.one_block.sha384;
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+		hash_oneblock_fn = mb_mgr->sha384_one_block;
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 52, 0)
 		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_384)) {
-			mb_ops->aux.multi_block.sha384(
+			IMB_SHA384(mb_mgr,
 				xform->auth.key.data,
 				xform->auth.key.length,
 				hashed_key);
@@ -236,10 +239,10 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
 		break;
 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
 		sess->auth.algo = SHA_512;
-		hash_oneblock_fn = mb_ops->aux.one_block.sha512;
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+		hash_oneblock_fn = mb_mgr->sha512_one_block;
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 52, 0)
 		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_512)) {
-			mb_ops->aux.multi_block.sha512(
+			IMB_SHA512(mb_mgr,
 				xform->auth.key.data,
 				xform->auth.key.length,
 				hashed_key);
@@ -292,13 +295,12 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
 
 /** Set session cipher parameters */
 static int
-aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
+aesni_mb_set_session_cipher_parameters(const MB_MGR *mb_mgr,
 		struct aesni_mb_session *sess,
 		const struct rte_crypto_sym_xform *xform)
 {
 	uint8_t is_aes = 0;
 	uint8_t is_3DES = 0;
-	aes_keyexp_t aes_keyexp_fn;
 
 	if (xform == NULL) {
 		sess->cipher.mode = NULL_CIPHER;
@@ -361,26 +363,26 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
 		switch (xform->cipher.key.length) {
 		case AES_128_BYTES:
 			sess->cipher.key_length_in_bytes = AES_128_BYTES;
-			aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
+			IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
 			break;
 		case AES_192_BYTES:
 			sess->cipher.key_length_in_bytes = AES_192_BYTES;
-			aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
+			IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
 			break;
 		case AES_256_BYTES:
 			sess->cipher.key_length_in_bytes = AES_256_BYTES;
-			aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
+			IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
 			break;
 		default:
 			AESNI_MB_LOG(ERR, "Invalid cipher key length");
 			return -EINVAL;
 		}
-
-		/* Expanded cipher keys */
-		(*aes_keyexp_fn)(xform->cipher.key.data,
-				sess->cipher.expanded_aes_keys.encode,
-				sess->cipher.expanded_aes_keys.decode);
-
 	} else if (is_3DES) {
 		uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
 				sess->cipher.exp_3des_keys.key[1],
@@ -388,9 +390,12 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
 
 		switch (xform->cipher.key.length) {
 		case  24:
-			des_key_schedule(keys[0], xform->cipher.key.data);
-			des_key_schedule(keys[1], xform->cipher.key.data+8);
-			des_key_schedule(keys[2], xform->cipher.key.data+16);
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
+			IMB_DES_KEYSCHED(mb_mgr, keys[1],
+					xform->cipher.key.data + 8);
+			IMB_DES_KEYSCHED(mb_mgr, keys[2],
+					xform->cipher.key.data + 16);
 
 			/* Initialize keys - 24 bytes: [K1-K2-K3] */
 			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
@@ -398,16 +403,18 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
 			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
 			break;
 		case 16:
-			des_key_schedule(keys[0], xform->cipher.key.data);
-			des_key_schedule(keys[1], xform->cipher.key.data+8);
-
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
+			IMB_DES_KEYSCHED(mb_mgr, keys[1],
+					xform->cipher.key.data + 8);
 			/* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
 			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
 			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
 			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
 			break;
 		case 8:
-			des_key_schedule(keys[0], xform->cipher.key.data);
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
 
 			/* Initialize keys - 8 bytes: [K1 = K2 = K3] */
 			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
@@ -431,9 +438,11 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
 		}
 		sess->cipher.key_length_in_bytes = 8;
 
-		des_key_schedule((uint64_t *)sess->cipher.expanded_aes_keys.encode,
+		IMB_DES_KEYSCHED(mb_mgr,
+			(uint64_t *)sess->cipher.expanded_aes_keys.encode,
 				xform->cipher.key.data);
-		des_key_schedule((uint64_t *)sess->cipher.expanded_aes_keys.decode,
+		IMB_DES_KEYSCHED(mb_mgr,
+			(uint64_t *)sess->cipher.expanded_aes_keys.decode,
 				xform->cipher.key.data);
 	}
 
@@ -441,15 +450,10 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
 }
 
 static int
-aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
+aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
 		struct aesni_mb_session *sess,
 		const struct rte_crypto_sym_xform *xform)
 {
-	union {
-		aes_keyexp_t aes_keyexp_fn;
-		aes_gcm_keyexp_t aes_gcm_keyexp_fn;
-	} keyexp;
-
 	switch (xform->aead.op) {
 	case RTE_CRYPTO_AEAD_OP_ENCRYPT:
 		sess->cipher.direction = ENCRYPT;
@@ -473,17 +477,15 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
 		switch (xform->aead.key.length) {
 		case AES_128_BYTES:
 			sess->cipher.key_length_in_bytes = AES_128_BYTES;
-			keyexp.aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
+			IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
 			break;
 		default:
 			AESNI_MB_LOG(ERR, "Invalid cipher key length");
 			return -EINVAL;
 		}
 
-		/* Expanded cipher keys */
-		(*keyexp.aes_keyexp_fn)(xform->aead.key.data,
-				sess->cipher.expanded_aes_keys.encode,
-				sess->cipher.expanded_aes_keys.decode);
 		break;
 
 	case RTE_CRYPTO_AEAD_AES_GCM:
@@ -493,26 +495,24 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
 		switch (xform->aead.key.length) {
 		case AES_128_BYTES:
 			sess->cipher.key_length_in_bytes = AES_128_BYTES;
-			keyexp.aes_gcm_keyexp_fn =
-					mb_ops->aux.keyexp.aes_gcm_128;
+			IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
 			break;
 		case AES_192_BYTES:
 			sess->cipher.key_length_in_bytes = AES_192_BYTES;
-			keyexp.aes_gcm_keyexp_fn =
-					mb_ops->aux.keyexp.aes_gcm_192;
+			IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
 			break;
 		case AES_256_BYTES:
 			sess->cipher.key_length_in_bytes = AES_256_BYTES;
-			keyexp.aes_gcm_keyexp_fn =
-					mb_ops->aux.keyexp.aes_gcm_256;
+			IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
 			break;
 		default:
 			AESNI_MB_LOG(ERR, "Invalid cipher key length");
 			return -EINVAL;
 		}
 
-		(keyexp.aes_gcm_keyexp_fn)(xform->aead.key.data,
-				&sess->cipher.gcm_key);
 		break;
 
 	default:
@@ -539,7 +539,7 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
 
 /** Parse crypto xform chain and set private session parameters */
 int
-aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
+aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
 		struct aesni_mb_session *sess,
 		const struct rte_crypto_sym_xform *xform)
 {
@@ -598,13 +598,13 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
 	/* Default IV length = 0 */
 	sess->iv.length = 0;
 
-	ret = aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform);
+	ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
 	if (ret != 0) {
 		AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
 		return ret;
 	}
 
-	ret = aesni_mb_set_session_cipher_parameters(mb_ops, sess,
+	ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
 			cipher_xform);
 	if (ret != 0) {
 		AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
@@ -612,7 +612,7 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
 	}
 
 	if (aead_xform) {
-		ret = aesni_mb_set_session_aead_parameters(mb_ops, sess,
+		ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
 				aead_xform);
 		if (ret != 0) {
 			AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
@@ -673,7 +673,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
 
 		sess = (struct aesni_mb_session *)_sess_private_data;
 
-		if (unlikely(aesni_mb_set_session_parameters(qp->op_fns,
+		if (unlikely(aesni_mb_set_session_parameters(qp->mb_mgr,
 				sess, op->sym->xform) != 0)) {
 			rte_mempool_put(qp->sess_mp, _sess);
 			rte_mempool_put(qp->sess_mp, _sess_private_data);
@@ -989,7 +989,7 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
 		if (processed_jobs == nb_ops)
 			break;
 
-		job = (*qp->op_fns->job.get_completed_job)(qp->mb_mgr);
+		job = IMB_GET_COMPLETED_JOB(qp->mb_mgr);
 	}
 
 	return processed_jobs;
@@ -1002,7 +1002,7 @@ flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
 	int processed_ops = 0;
 
 	/* Flush the remaining jobs */
-	JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(qp->mb_mgr);
+	JOB_AES_HMAC *job = IMB_FLUSH_JOB(qp->mb_mgr);
 
 	if (job)
 		processed_ops += handle_completed_jobs(qp, job,
@@ -1042,7 +1042,7 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
 	uint8_t digest_idx = qp->digest_idx;
 	do {
 		/* Get next free mb job struct from mb manager */
-		job = (*qp->op_fns->job.get_next)(qp->mb_mgr);
+		job = IMB_GET_NEXT_JOB(qp->mb_mgr);
 		if (unlikely(job == NULL)) {
 			/* if no free mb job structs we need to flush mb_mgr */
 			processed_jobs += flush_mb_mgr(qp,
@@ -1052,7 +1052,7 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
 			if (nb_ops == processed_jobs)
 				break;
 
-			job = (*qp->op_fns->job.get_next)(qp->mb_mgr);
+			job = IMB_GET_NEXT_JOB(qp->mb_mgr);
 		}
 
 		/*
@@ -1072,8 +1072,11 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
 		}
 
 		/* Submit job to multi-buffer for processing */
-		job = (*qp->op_fns->job.submit)(qp->mb_mgr);
-
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
+		job = IMB_SUBMIT_JOB(qp->mb_mgr);
+#else
+		job = IMB_SUBMIT_JOB_NOCHECK(qp->mb_mgr);
+#endif
 		/*
 		 * If submit returns a processed job then handle it,
 		 * before submitting subsequent jobs
@@ -1105,6 +1108,7 @@ cryptodev_aesni_mb_create(const char *name,
 	struct rte_cryptodev *dev;
 	struct aesni_mb_private *internals;
 	enum aesni_mb_vector_mode vector_mode;
+	MB_MGR *mb_mgr;
 
 	/* Check CPU for support for AES instruction set */
 	if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
@@ -1139,18 +1143,26 @@ cryptodev_aesni_mb_create(const char *name,
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_CPU_AESNI;
 
+	mb_mgr = alloc_mb_mgr(0);
+	if (mb_mgr == NULL)
+		return -ENOMEM;
+
 	switch (vector_mode) {
 	case RTE_AESNI_MB_SSE:
 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+		init_mb_mgr_sse(mb_mgr);
 		break;
 	case RTE_AESNI_MB_AVX:
 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+		init_mb_mgr_avx(mb_mgr);
 		break;
 	case RTE_AESNI_MB_AVX2:
 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+		init_mb_mgr_avx2(mb_mgr);
 		break;
 	case RTE_AESNI_MB_AVX512:
 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+		init_mb_mgr_avx512(mb_mgr);
 		break;
 	default:
 		break;
@@ -1161,6 +1173,7 @@ cryptodev_aesni_mb_create(const char *name,
 
 	internals->vector_mode = vector_mode;
 	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+	internals->mb_mgr = mb_mgr;
 
 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
 	AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
@@ -1204,6 +1217,7 @@ static int
 cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
 {
 	struct rte_cryptodev *cryptodev;
+	struct aesni_mb_private *internals;
 	const char *name;
 
 	name = rte_vdev_device_name(vdev);
@@ -1214,6 +1228,10 @@ cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
 	if (cryptodev == NULL)
 		return -ENODEV;
 
+	internals = cryptodev->data->dev_private;
+
+	free_mb_mgr(internals->mb_mgr);
+
 	return rte_cryptodev_pmd_destroy(cryptodev);
 }
 
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index f3eff2685..bfbd97923 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -509,8 +509,6 @@ aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
 		r = rte_ring_lookup(qp->name);
 		if (r)
 			rte_ring_free(r);
-		if (qp->mb_mgr)
-			free_mb_mgr(qp->mb_mgr);
 		rte_free(qp);
 		dev->data->queue_pairs[qp_id] = NULL;
 	}
@@ -589,13 +587,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 		goto qp_setup_cleanup;
 
 
-	qp->mb_mgr = alloc_mb_mgr(0);
-	if (qp->mb_mgr == NULL) {
-		ret = -ENOMEM;
-		goto qp_setup_cleanup;
-	}
-
-	qp->op_fns = &job_ops[internals->vector_mode];
+	qp->mb_mgr = internals->mb_mgr;
 
 	qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
 			qp_conf->nb_descriptors, socket_id);
@@ -612,9 +604,6 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 
 	snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
 				"digest_mp_%u_%u", dev->data->dev_id, qp_id);
-
-	/* Initialise multi-buffer manager */
-	(*qp->op_fns->job.init_mgr)(qp->mb_mgr);
 	return 0;
 
 qp_setup_cleanup:
@@ -663,7 +652,7 @@ aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
 		return -ENOMEM;
 	}
 
-	ret = aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
+	ret = aesni_mb_set_session_parameters(internals->mb_mgr,
 			sess_private_data, xform);
 	if (ret != 0) {
 		AESNI_MB_LOG(ERR, "failed configure session parameters");
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index d8021cdaa..1dc2c5bf0 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -5,7 +5,24 @@
 #ifndef _RTE_AESNI_MB_PMD_PRIVATE_H_
 #define _RTE_AESNI_MB_PMD_PRIVATE_H_
 
-#include "aesni_mb_ops.h"
+#include <intel-ipsec-mb.h>
+
+/*
+ * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
+ * so if macro is not defined, it means that the version is 0.49.
+ */
+#if !defined(IMB_VERSION_NUM)
+#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
+#endif
+
+enum aesni_mb_vector_mode {
+	RTE_AESNI_MB_NOT_SUPPORTED = 0,
+	RTE_AESNI_MB_SSE,
+	RTE_AESNI_MB_AVX,
+	RTE_AESNI_MB_AVX2,
+	RTE_AESNI_MB_AVX512
+};
 
 #define CRYPTODEV_NAME_AESNI_MB_PMD	crypto_aesni_mb
 /**< AES-NI Multi buffer PMD device name */
@@ -83,7 +100,9 @@ static const unsigned auth_digest_byte_lengths[] = {
 		[AES_XCBC]	= 16,
 		[AES_CMAC]	= 16,
 		[AES_GMAC]	= 12,
-		[NULL_HASH]		= 0
+		[NULL_HASH]	= 0,
+	/**< Vector mode dependent pointer table of the multi-buffer APIs */
+
 };
 
 /**
@@ -115,6 +134,8 @@ struct aesni_mb_private {
 	/**< CPU vector instruction set mode */
 	unsigned max_nb_queue_pairs;
 	/**< Max number of queue pairs supported by device */
+	MB_MGR *mb_mgr;
+	/**< Multi-buffer instance */
 };
 
 /** AESNI Multi buffer queue pair */
@@ -123,8 +144,6 @@ struct aesni_mb_qp {
 	/**< Queue Pair Identifier */
 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
 	/**< Unique Queue Pair Name */
-	const struct aesni_mb_op_fns *op_fns;
-	/**< Vector mode dependent pointer table of the multi-buffer APIs */
 	MB_MGR *mb_mgr;
 	/**< Multi-buffer instance */
 	struct rte_ring *ingress_queue;
@@ -153,7 +172,9 @@ struct aesni_mb_session {
 	} iv;
 	/**< IV parameters */
 
-	/** Cipher Parameters */
+	/** Cipher Parameters */const struct aesni_mb_op_fns *op_fns;
+	/**< Vector mode dependent pointer table of the multi-buffer APIs */
+
 	struct {
 		/** Cipher direction - encrypt / decrypt */
 		JOB_CIPHER_DIRECTION direction;
@@ -238,7 +259,7 @@ struct aesni_mb_session {
  *
  */
 extern int
-aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
+aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
 		struct aesni_mb_session *sess,
 		const struct rte_crypto_sym_xform *xform);
 
-- 
2.13.6

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v2] crypto/aesni_mb: use architure independent marcos
  2018-11-23 14:04 [dpdk-dev] [PATCH] crypto/aesni_mb: use of archtecture independent macros Fan Zhang
@ 2018-12-11 12:29 ` Fan Zhang
  2018-12-18 10:26   ` Akhil Goyal
                     ` (2 more replies)
  0 siblings, 3 replies; 15+ messages in thread
From: Fan Zhang @ 2018-12-11 12:29 UTC (permalink / raw)
  To: dev; +Cc: akhil.goyal, Lukasz Krakowiak

From: Lukasz Krakowiak <lukaszx.krakowiak@intel.com>

This patch updates the aesni_mb to use IMB_* arch independent
macros to reduce the code size and future maintaining effort.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
---
v2:
- making the PMD compatible with both new intel-ipsec-mb version 0.52 and older
- fixed a bug

 drivers/crypto/aesni_mb/Makefile                   |   24 +-
 drivers/crypto/aesni_mb/meson.build                |   14 +-
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_next.c    | 1237 ++++++++++++++++++++
 .../crypto/aesni_mb/rte_aesni_mb_pmd_ops_next.c    |  681 +++++++++++
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h |   52 +-
 5 files changed, 1998 insertions(+), 10 deletions(-)
 create mode 100755 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_next.c
 create mode 100755 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops_next.c

diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile
index 806a95eb8..24630a6ca 100644
--- a/drivers/crypto/aesni_mb/Makefile
+++ b/drivers/crypto/aesni_mb/Makefile
@@ -22,8 +22,26 @@ LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
 LDLIBS += -lrte_cryptodev
 LDLIBS += -lrte_bus_vdev
 
-# library source files
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c
+IMB_HDR = /usr/include/intel-ipsec-mb.h
+
+# Detect library version
+IMB_VERSION = $(shell grep -e "IMB_VERSION_STR" $(IMB_HDR) | cut -d'"' -f2)
+IMB_VERSION_NUM = $(shell grep -e "IMB_VERSION_NUM" $(IMB_HDR) | cut -d' ' -f3)
+
+ifeq ($(IMB_VERSION),)
+	# files for older version of IMB
+	SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
+	SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c
+else
+	ifeq ($(shell expr $(IMB_VERSION_NUM) \>= 0x3400), 1)
+		# files for a new version of IMB
+		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_next.c
+		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops_next.c
+	else
+		# files for older version of IMB
+		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
+		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c
+	endif
+endif
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/aesni_mb/meson.build b/drivers/crypto/aesni_mb/meson.build
index aae0995e5..490f68eaf 100644
--- a/drivers/crypto/aesni_mb/meson.build
+++ b/drivers/crypto/aesni_mb/meson.build
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2018 Intel Corporation
-
+IPSec_MB_ver_0_52 = '0.52.0'
 lib = cc.find_library('IPSec_MB', required: false)
 if not lib.found()
 	build = false
@@ -8,5 +8,15 @@ else
 	ext_deps += lib
 endif
 
-sources = files('rte_aesni_mb_pmd.c', 'rte_aesni_mb_pmd_ops.c')
+imb_version = cc.get_define('IMB_VERSION_STR',
+        prefix : '#include<intel-ipsec-mb.h>')
+
+if imb_version.version_compare('>=' + IPSec_MB_ver_0_52)
+	message('Build for a new version of library IPSec_MB[' + imb_version + ']')
+	sources = files('rte_aesni_mb_pmd_next.c', 'rte_aesni_mb_pmd_ops_next.c')
+else
+	message('Build for older version of library IPSec_MB[' + imb_version + ']')
+	sources = files('rte_aesni_mb_pmd.c', 'rte_aesni_mb_pmd_ops.c')
+endif
+
 deps += ['bus_vdev']
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_next.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_next.c
new file mode 100755
index 000000000..2c25b7b32
--- /dev/null
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_next.c
@@ -0,0 +1,1237 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2017 Intel Corporation
+ */
+
+#include <intel-ipsec-mb.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_aesni_mb_pmd_private.h"
+
+#define AES_CCM_DIGEST_MIN_LEN 4
+#define AES_CCM_DIGEST_MAX_LEN 16
+#define HMAC_MAX_BLOCK_SIZE 128
+static uint8_t cryptodev_driver_id;
+
+typedef void (*hash_one_block_t)(const void *data, void *digest);
+typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
+
+/**
+ * Calculate the authentication pre-computes
+ *
+ * @param one_block_hash	Function pointer to calculate digest on ipad/opad
+ * @param ipad			Inner pad output byte array
+ * @param opad			Outer pad output byte array
+ * @param hkey			Authentication key
+ * @param hkey_len		Authentication key length
+ * @param blocksize		Block size of selected hash algo
+ */
+static void
+calculate_auth_precomputes(hash_one_block_t one_block_hash,
+		uint8_t *ipad, uint8_t *opad,
+		uint8_t *hkey, uint16_t hkey_len,
+		uint16_t blocksize)
+{
+	unsigned i, length;
+
+	uint8_t ipad_buf[blocksize] __rte_aligned(16);
+	uint8_t opad_buf[blocksize] __rte_aligned(16);
+
+	/* Setup inner and outer pads */
+	memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
+	memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
+
+	/* XOR hash key with inner and outer pads */
+	length = hkey_len > blocksize ? blocksize : hkey_len;
+
+	for (i = 0; i < length; i++) {
+		ipad_buf[i] ^= hkey[i];
+		opad_buf[i] ^= hkey[i];
+	}
+
+	/* Compute partial hashes */
+	(*one_block_hash)(ipad_buf, ipad);
+	(*one_block_hash)(opad_buf, opad);
+
+	/* Clean up stack */
+	memset(ipad_buf, 0, blocksize);
+	memset(opad_buf, 0, blocksize);
+}
+
+/** Get xform chain order */
+static enum aesni_mb_operation
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+	if (xform == NULL)
+		return AESNI_MB_OP_NOT_SUPPORTED;
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (xform->next == NULL)
+			return AESNI_MB_OP_CIPHER_ONLY;
+		if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+			return AESNI_MB_OP_CIPHER_HASH;
+	}
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (xform->next == NULL)
+			return AESNI_MB_OP_HASH_ONLY;
+		if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+			return AESNI_MB_OP_HASH_CIPHER;
+	}
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM ||
+				xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
+			if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+				return AESNI_MB_OP_AEAD_CIPHER_HASH;
+			else
+				return AESNI_MB_OP_AEAD_HASH_CIPHER;
+		}
+	}
+
+	return AESNI_MB_OP_NOT_SUPPORTED;
+}
+
+/** Set session authentication parameters */
+static int
+aesni_mb_set_session_auth_parameters(const MB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	hash_one_block_t hash_oneblock_fn;
+	unsigned int key_larger_block_size = 0;
+	uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
+
+	if (xform == NULL) {
+		sess->auth.algo = NULL_HASH;
+		return 0;
+	}
+
+	if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
+		AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth");
+		return -1;
+	}
+
+	/* Set the request digest size */
+	sess->auth.req_digest_len = xform->auth.digest_length;
+
+	/* Select auth generate/verify */
+	sess->auth.operation = xform->auth.op;
+
+	/* Set Authentication Parameters */
+	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
+		sess->auth.algo = AES_XCBC;
+
+		uint16_t xcbc_mac_digest_len =
+			get_truncated_digest_byte_length(AES_XCBC);
+		if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
+			AESNI_MB_LOG(ERR, "Invalid digest size\n");
+			return -EINVAL;
+		}
+		sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+		IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
+				sess->auth.xcbc.k1_expanded,
+				sess->auth.xcbc.k2, sess->auth.xcbc.k3);
+		return 0;
+	}
+
+	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
+		uint32_t dust[4*15];
+
+		sess->auth.algo = AES_CMAC;
+
+		uint16_t cmac_digest_len = get_digest_byte_length(AES_CMAC);
+
+		if (sess->auth.req_digest_len > cmac_digest_len) {
+			AESNI_MB_LOG(ERR, "Invalid digest size\n");
+			return -EINVAL;
+		}
+		/*
+		 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
+		 * in version 0.50 and sizes of 12 and 16 bytes,
+		 * in version 0.49.
+		 * If size requested is different, generate the full digest
+		 * (16 bytes) in a temporary location and then memcpy
+		 * the requested number of bytes.
+		 */
+		if (sess->auth.req_digest_len < 4)
+			sess->auth.gen_digest_len = cmac_digest_len;
+		else
+			sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+		IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
+				sess->auth.cmac.expkey, dust);
+		IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
+				sess->auth.cmac.skey1, sess->auth.cmac.skey2);
+		return 0;
+	}
+
+	switch (xform->auth.algo) {
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		sess->auth.algo = MD5;
+		hash_oneblock_fn = mb_mgr->md5_one_block;
+		break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		sess->auth.algo = SHA1;
+		hash_oneblock_fn = mb_mgr->sha1_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(SHA1)) {
+			IMB_SHA1(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		sess->auth.algo = SHA_224;
+		hash_oneblock_fn = mb_mgr->sha224_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_224)) {
+			IMB_SHA224(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		sess->auth.algo = SHA_256;
+		hash_oneblock_fn = mb_mgr->sha256_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_256)) {
+			IMB_SHA256(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		sess->auth.algo = SHA_384;
+		hash_oneblock_fn = mb_mgr->sha384_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_384)) {
+			IMB_SHA384(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		sess->auth.algo = SHA_512;
+		hash_oneblock_fn = mb_mgr->sha512_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_512)) {
+			IMB_SHA512(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
+		return -ENOTSUP;
+	}
+	uint16_t trunc_digest_size =
+			get_truncated_digest_byte_length(sess->auth.algo);
+	uint16_t full_digest_size =
+			get_digest_byte_length(sess->auth.algo);
+
+	if (sess->auth.req_digest_len > full_digest_size ||
+			sess->auth.req_digest_len == 0) {
+		AESNI_MB_LOG(ERR, "Invalid digest size\n");
+		return -EINVAL;
+	}
+
+	if (sess->auth.req_digest_len != trunc_digest_size &&
+			sess->auth.req_digest_len != full_digest_size)
+		sess->auth.gen_digest_len = full_digest_size;
+	else
+		sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+	/* Calculate Authentication precomputes */
+	if (key_larger_block_size) {
+		calculate_auth_precomputes(hash_oneblock_fn,
+			sess->auth.pads.inner, sess->auth.pads.outer,
+			hashed_key,
+			xform->auth.key.length,
+			get_auth_algo_blocksize(sess->auth.algo));
+	} else {
+		calculate_auth_precomputes(hash_oneblock_fn,
+			sess->auth.pads.inner, sess->auth.pads.outer,
+			xform->auth.key.data,
+			xform->auth.key.length,
+			get_auth_algo_blocksize(sess->auth.algo));
+	}
+
+	return 0;
+}
+
+/** Set session cipher parameters */
+static int
+aesni_mb_set_session_cipher_parameters(const MB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	uint8_t is_aes = 0;
+	uint8_t is_3DES = 0;
+
+	if (xform == NULL) {
+		sess->cipher.mode = NULL_CIPHER;
+		return 0;
+	}
+
+	if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher");
+		return -EINVAL;
+	}
+
+	/* Select cipher direction */
+	switch (xform->cipher.op) {
+	case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
+		sess->cipher.direction = ENCRYPT;
+		break;
+	case RTE_CRYPTO_CIPHER_OP_DECRYPT:
+		sess->cipher.direction = DECRYPT;
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Invalid cipher operation parameter");
+		return -EINVAL;
+	}
+
+	/* Select cipher mode */
+	switch (xform->cipher.algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		sess->cipher.mode = CBC;
+		is_aes = 1;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		sess->cipher.mode = CNTR;
+		is_aes = 1;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+		sess->cipher.mode = DOCSIS_SEC_BPI;
+		is_aes = 1;
+		break;
+	case RTE_CRYPTO_CIPHER_DES_CBC:
+		sess->cipher.mode = DES;
+		break;
+	case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+		sess->cipher.mode = DOCSIS_DES;
+		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		sess->cipher.mode = DES3;
+		is_3DES = 1;
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter");
+		return -ENOTSUP;
+	}
+
+	/* Set IV parameters */
+	sess->iv.offset = xform->cipher.iv.offset;
+	sess->iv.length = xform->cipher.iv.length;
+
+	/* Check key length and choose key expansion function for AES */
+	if (is_aes) {
+		switch (xform->cipher.key.length) {
+		case AES_128_BYTES:
+			sess->cipher.key_length_in_bytes = AES_128_BYTES;
+			IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		case AES_192_BYTES:
+			sess->cipher.key_length_in_bytes = AES_192_BYTES;
+			IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		case AES_256_BYTES:
+			sess->cipher.key_length_in_bytes = AES_256_BYTES;
+			IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		default:
+			AESNI_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+	} else if (is_3DES) {
+		uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
+				sess->cipher.exp_3des_keys.key[1],
+				sess->cipher.exp_3des_keys.key[2]};
+
+		switch (xform->cipher.key.length) {
+		case  24:
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
+			IMB_DES_KEYSCHED(mb_mgr, keys[1],
+					xform->cipher.key.data + 8);
+			IMB_DES_KEYSCHED(mb_mgr, keys[2],
+					xform->cipher.key.data + 16);
+
+			/* Initialize keys - 24 bytes: [K1-K2-K3] */
+			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
+			break;
+		case 16:
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
+			IMB_DES_KEYSCHED(mb_mgr, keys[1],
+					xform->cipher.key.data + 8);
+			/* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
+			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+			break;
+		case 8:
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
+
+			/* Initialize keys - 8 bytes: [K1 = K2 = K3] */
+			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
+			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+			break;
+		default:
+			AESNI_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+
+		sess->cipher.key_length_in_bytes = 24;
+	} else {
+		if (xform->cipher.key.length != 8) {
+			AESNI_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+		sess->cipher.key_length_in_bytes = 8;
+
+		IMB_DES_KEYSCHED(mb_mgr,
+			(uint64_t *)sess->cipher.expanded_aes_keys.encode,
+				xform->cipher.key.data);
+		IMB_DES_KEYSCHED(mb_mgr,
+			(uint64_t *)sess->cipher.expanded_aes_keys.decode,
+				xform->cipher.key.data);
+	}
+
+	return 0;
+}
+
+static int
+aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	switch (xform->aead.op) {
+	case RTE_CRYPTO_AEAD_OP_ENCRYPT:
+		sess->cipher.direction = ENCRYPT;
+		sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
+		break;
+	case RTE_CRYPTO_AEAD_OP_DECRYPT:
+		sess->cipher.direction = DECRYPT;
+		sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Invalid aead operation parameter");
+		return -EINVAL;
+	}
+
+	switch (xform->aead.algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		sess->cipher.mode = CCM;
+		sess->auth.algo = AES_CCM;
+
+		/* Check key length and choose key expansion function for AES */
+		switch (xform->aead.key.length) {
+		case AES_128_BYTES:
+			sess->cipher.key_length_in_bytes = AES_128_BYTES;
+			IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		default:
+			AESNI_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+
+		break;
+
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		sess->cipher.mode = GCM;
+		sess->auth.algo = AES_GMAC;
+
+		switch (xform->aead.key.length) {
+		case AES_128_BYTES:
+			sess->cipher.key_length_in_bytes = AES_128_BYTES;
+			IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
+			break;
+		case AES_192_BYTES:
+			sess->cipher.key_length_in_bytes = AES_192_BYTES;
+			IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
+			break;
+		case AES_256_BYTES:
+			sess->cipher.key_length_in_bytes = AES_256_BYTES;
+			IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
+			break;
+		default:
+			AESNI_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+
+		break;
+
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
+		return -ENOTSUP;
+	}
+
+	/* Set IV parameters */
+	sess->iv.offset = xform->aead.iv.offset;
+	sess->iv.length = xform->aead.iv.length;
+
+	sess->auth.req_digest_len = xform->aead.digest_length;
+	/* CCM digests must be between 4 and 16 and an even number */
+	if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
+			sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
+			(sess->auth.req_digest_len & 1) == 1) {
+		AESNI_MB_LOG(ERR, "Invalid digest size\n");
+		return -EINVAL;
+	}
+	sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+	return 0;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_sym_xform *auth_xform = NULL;
+	const struct rte_crypto_sym_xform *cipher_xform = NULL;
+	const struct rte_crypto_sym_xform *aead_xform = NULL;
+	int ret;
+
+	/* Select Crypto operation - hash then cipher / cipher then hash */
+	switch (aesni_mb_get_chain_order(xform)) {
+	case AESNI_MB_OP_HASH_CIPHER:
+		sess->chain_order = HASH_CIPHER;
+		auth_xform = xform;
+		cipher_xform = xform->next;
+		break;
+	case AESNI_MB_OP_CIPHER_HASH:
+		sess->chain_order = CIPHER_HASH;
+		auth_xform = xform->next;
+		cipher_xform = xform;
+		break;
+	case AESNI_MB_OP_HASH_ONLY:
+		sess->chain_order = HASH_CIPHER;
+		auth_xform = xform;
+		cipher_xform = NULL;
+		break;
+	case AESNI_MB_OP_CIPHER_ONLY:
+		/*
+		 * Multi buffer library operates only at two modes,
+		 * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
+		 * chain order depends on cipher operation: encryption is always
+		 * the first operation and decryption the last one.
+		 */
+		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+			sess->chain_order = CIPHER_HASH;
+		else
+			sess->chain_order = HASH_CIPHER;
+		auth_xform = NULL;
+		cipher_xform = xform;
+		break;
+	case AESNI_MB_OP_AEAD_CIPHER_HASH:
+		sess->chain_order = CIPHER_HASH;
+		sess->aead.aad_len = xform->aead.aad_length;
+		aead_xform = xform;
+		break;
+	case AESNI_MB_OP_AEAD_HASH_CIPHER:
+		sess->chain_order = HASH_CIPHER;
+		sess->aead.aad_len = xform->aead.aad_length;
+		aead_xform = xform;
+		break;
+	case AESNI_MB_OP_NOT_SUPPORTED:
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter");
+		return -ENOTSUP;
+	}
+
+	/* Default IV length = 0 */
+	sess->iv.length = 0;
+
+	ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
+	if (ret != 0) {
+		AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
+		return ret;
+	}
+
+	ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
+			cipher_xform);
+	if (ret != 0) {
+		AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
+		return ret;
+	}
+
+	if (aead_xform) {
+		ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
+				aead_xform);
+		if (ret != 0) {
+			AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * burst enqueue, place crypto operations on ingress queue for processing.
+ *
+ * @param __qp         Queue Pair to process
+ * @param ops          Crypto operations for processing
+ * @param nb_ops       Number of crypto operations for processing
+ *
+ * @return
+ * - Number of crypto operations enqueued
+ */
+static uint16_t
+aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct aesni_mb_qp *qp = __qp;
+
+	unsigned int nb_enqueued;
+
+	nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
+			(void **)ops, nb_ops, NULL);
+
+	qp->stats.enqueued_count += nb_enqueued;
+
+	return nb_enqueued;
+}
+
+/** Get multi buffer session */
+static inline struct aesni_mb_session *
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
+{
+	struct aesni_mb_session *sess = NULL;
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		if (likely(op->sym->session != NULL))
+			sess = (struct aesni_mb_session *)
+					get_sym_session_private_data(
+					op->sym->session,
+					cryptodev_driver_id);
+	} else {
+		void *_sess = NULL;
+		void *_sess_private_data = NULL;
+
+		if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+			return NULL;
+
+		if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+			return NULL;
+
+		sess = (struct aesni_mb_session *)_sess_private_data;
+
+		if (unlikely(aesni_mb_set_session_parameters(qp->mb_mgr,
+				sess, op->sym->xform) != 0)) {
+			rte_mempool_put(qp->sess_mp, _sess);
+			rte_mempool_put(qp->sess_mp, _sess_private_data);
+			sess = NULL;
+		}
+		op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+		set_sym_session_private_data(op->sym->session,
+				cryptodev_driver_id, _sess_private_data);
+	}
+
+	if (unlikely(sess == NULL))
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+	return sess;
+}
+
+/**
+ * Process a crypto operation and complete a JOB_AES_HMAC job structure for
+ * submission to the multi buffer library for processing.
+ *
+ * @param	qp	queue pair
+ * @param	job	JOB_AES_HMAC structure to fill
+ * @param	m	mbuf to process
+ *
+ * @return
+ * - Completed JOB_AES_HMAC structure pointer on success
+ * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
+ */
+static inline int
+set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
+		struct rte_crypto_op *op, uint8_t *digest_idx)
+{
+	struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+	struct aesni_mb_session *session;
+	uint16_t m_offset = 0;
+
+	session = get_session(qp, op);
+	if (session == NULL) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+		return -1;
+	}
+
+	/* Set crypto operation */
+	job->chain_order = session->chain_order;
+
+	/* Set cipher parameters */
+	job->cipher_direction = session->cipher.direction;
+	job->cipher_mode = session->cipher.mode;
+
+	job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
+
+	/* Set authentication parameters */
+	job->hash_alg = session->auth.algo;
+
+	switch (job->hash_alg) {
+	case AES_XCBC:
+		job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
+		job->u.XCBC._k2 = session->auth.xcbc.k2;
+		job->u.XCBC._k3 = session->auth.xcbc.k3;
+
+		job->aes_enc_key_expanded =
+				session->cipher.expanded_aes_keys.encode;
+		job->aes_dec_key_expanded =
+				session->cipher.expanded_aes_keys.decode;
+		break;
+
+	case AES_CCM:
+		job->u.CCM.aad = op->sym->aead.aad.data + 18;
+		job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
+		job->aes_enc_key_expanded =
+				session->cipher.expanded_aes_keys.encode;
+		job->aes_dec_key_expanded =
+				session->cipher.expanded_aes_keys.decode;
+		break;
+
+	case AES_CMAC:
+		job->u.CMAC._key_expanded = session->auth.cmac.expkey;
+		job->u.CMAC._skey1 = session->auth.cmac.skey1;
+		job->u.CMAC._skey2 = session->auth.cmac.skey2;
+		job->aes_enc_key_expanded =
+				session->cipher.expanded_aes_keys.encode;
+		job->aes_dec_key_expanded =
+				session->cipher.expanded_aes_keys.decode;
+		break;
+
+	case AES_GMAC:
+		job->u.GCM.aad = op->sym->aead.aad.data;
+		job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
+		job->aes_enc_key_expanded = &session->cipher.gcm_key;
+		job->aes_dec_key_expanded = &session->cipher.gcm_key;
+		break;
+
+	default:
+		job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
+		job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
+
+		if (job->cipher_mode == DES3) {
+			job->aes_enc_key_expanded =
+				session->cipher.exp_3des_keys.ks_ptr;
+			job->aes_dec_key_expanded =
+				session->cipher.exp_3des_keys.ks_ptr;
+		} else {
+			job->aes_enc_key_expanded =
+				session->cipher.expanded_aes_keys.encode;
+			job->aes_dec_key_expanded =
+				session->cipher.expanded_aes_keys.decode;
+		}
+	}
+
+	/* Mutable crypto operation parameters */
+	if (op->sym->m_dst) {
+		m_src = m_dst = op->sym->m_dst;
+
+		/* append space for output data to mbuf */
+		char *odata = rte_pktmbuf_append(m_dst,
+				rte_pktmbuf_data_len(op->sym->m_src));
+		if (odata == NULL) {
+			AESNI_MB_LOG(ERR, "failed to allocate space in destination "
+					"mbuf for source data");
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
+		}
+
+		memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
+				rte_pktmbuf_data_len(op->sym->m_src));
+	} else {
+		m_dst = m_src;
+		if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC)
+			m_offset = op->sym->aead.data.offset;
+		else
+			m_offset = op->sym->cipher.data.offset;
+	}
+
+	/* Set digest output location */
+	if (job->hash_alg != NULL_HASH &&
+			session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+		job->auth_tag_output = qp->temp_digests[*digest_idx];
+		*digest_idx = (*digest_idx + 1) % MAX_JOBS;
+	} else {
+		if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC)
+			job->auth_tag_output = op->sym->aead.digest.data;
+		else
+			job->auth_tag_output = op->sym->auth.digest.data;
+
+		if (session->auth.req_digest_len != session->auth.gen_digest_len) {
+			job->auth_tag_output = qp->temp_digests[*digest_idx];
+			*digest_idx = (*digest_idx + 1) % MAX_JOBS;
+		}
+	}
+	/*
+	 * Multi-buffer library current only support returning a truncated
+	 * digest length as specified in the relevant IPsec RFCs
+	 */
+
+	/* Set digest length */
+	job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
+
+	/* Set IV parameters */
+	job->iv_len_in_bytes = session->iv.length;
+
+	/* Data  Parameter */
+	job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+	job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
+
+	switch (job->hash_alg) {
+	case AES_CCM:
+		job->cipher_start_src_offset_in_bytes =
+				op->sym->aead.data.offset;
+		job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
+		job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
+		job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
+
+		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+			session->iv.offset + 1);
+		break;
+
+	case AES_GMAC:
+		job->cipher_start_src_offset_in_bytes =
+				op->sym->aead.data.offset;
+		job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
+		job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
+		job->msg_len_to_hash_in_bytes = job->msg_len_to_cipher_in_bytes;
+		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+				session->iv.offset);
+		break;
+
+	default:
+		job->cipher_start_src_offset_in_bytes =
+				op->sym->cipher.data.offset;
+		job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
+
+		job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+		job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
+
+		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+			session->iv.offset);
+	}
+
+	/* Set user data to be crypto operation data struct */
+	job->user_data = op;
+
+	return 0;
+}
+
+static inline void
+verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
+		struct aesni_mb_session *sess)
+{
+	/* Verify digest if required */
+	if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC) {
+		if (memcmp(job->auth_tag_output, op->sym->aead.digest.data,
+				sess->auth.req_digest_len) != 0)
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	} else {
+		if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
+				sess->auth.req_digest_len) != 0)
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	}
+}
+
+static inline void
+generate_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
+		struct aesni_mb_session *sess)
+{
+	/* No extra copy neeed */
+	if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
+		return;
+
+	/*
+	 * This can only happen for HMAC, so only digest
+	 * for authentication algos is required
+	 */
+	memcpy(op->sym->auth.digest.data, job->auth_tag_output,
+			sess->auth.req_digest_len);
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param qp		Queue Pair to process
+ * @param job	JOB_AES_HMAC job to process
+ *
+ * @return
+ * - Returns processed crypto operation.
+ * - Returns NULL on invalid job
+ */
+static inline struct rte_crypto_op *
+post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
+{
+	struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
+	struct aesni_mb_session *sess = get_sym_session_private_data(
+							op->sym->session,
+							cryptodev_driver_id);
+
+	if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
+		switch (job->status) {
+		case STS_COMPLETED:
+			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+			if (job->hash_alg != NULL_HASH) {
+				if (sess->auth.operation ==
+						RTE_CRYPTO_AUTH_OP_VERIFY)
+					verify_digest(job, op, sess);
+				else
+					generate_digest(job, op, sess);
+			}
+			break;
+		default:
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		}
+	}
+
+	/* Free session if a session-less crypto op */
+	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		memset(sess, 0, sizeof(struct aesni_mb_session));
+		memset(op->sym->session, 0,
+				rte_cryptodev_sym_get_header_session_size());
+		rte_mempool_put(qp->sess_mp, sess);
+		rte_mempool_put(qp->sess_mp, op->sym->session);
+		op->sym->session = NULL;
+	}
+
+	return op;
+}
+
+/**
+ * Process a completed JOB_AES_HMAC job and keep processing jobs until
+ * get_completed_job return NULL
+ *
+ * @param qp		Queue Pair to process
+ * @param job		JOB_AES_HMAC job
+ *
+ * @return
+ * - Number of processed jobs
+ */
+static unsigned
+handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct rte_crypto_op *op = NULL;
+	unsigned processed_jobs = 0;
+
+	while (job != NULL) {
+		op = post_process_mb_job(qp, job);
+
+		if (op) {
+			ops[processed_jobs++] = op;
+			qp->stats.dequeued_count++;
+		} else {
+			qp->stats.dequeue_err_count++;
+			break;
+		}
+		if (processed_jobs == nb_ops)
+			break;
+
+		job = IMB_GET_COMPLETED_JOB(qp->mb_mgr);
+	}
+
+	return processed_jobs;
+}
+
+static inline uint16_t
+flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	int processed_ops = 0;
+
+	/* Flush the remaining jobs */
+	JOB_AES_HMAC *job = IMB_FLUSH_JOB(qp->mb_mgr);
+
+	if (job)
+		processed_ops += handle_completed_jobs(qp, job,
+				&ops[processed_ops], nb_ops - processed_ops);
+
+	return processed_ops;
+}
+
+static inline JOB_AES_HMAC *
+set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
+{
+	job->chain_order = HASH_CIPHER;
+	job->cipher_mode = NULL_CIPHER;
+	job->hash_alg = NULL_HASH;
+	job->cipher_direction = DECRYPT;
+
+	/* Set user data to be crypto operation data struct */
+	job->user_data = op;
+
+	return job;
+}
+
+static uint16_t
+aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct aesni_mb_qp *qp = queue_pair;
+
+	struct rte_crypto_op *op;
+	JOB_AES_HMAC *job;
+
+	int retval, processed_jobs = 0;
+
+	if (unlikely(nb_ops == 0))
+		return 0;
+
+	uint8_t digest_idx = qp->digest_idx;
+	do {
+		/* Get next free mb job struct from mb manager */
+		job = IMB_GET_NEXT_JOB(qp->mb_mgr);
+		if (unlikely(job == NULL)) {
+			/* if no free mb job structs we need to flush mb_mgr */
+			processed_jobs += flush_mb_mgr(qp,
+					&ops[processed_jobs],
+					nb_ops - processed_jobs);
+
+			if (nb_ops == processed_jobs)
+				break;
+
+			job = IMB_GET_NEXT_JOB(qp->mb_mgr);
+		}
+
+		/*
+		 * Get next operation to process from ingress queue.
+		 * There is no need to return the job to the MB_MGR
+		 * if there are no more operations to process, since the MB_MGR
+		 * can use that pointer again in next get_next calls.
+		 */
+		retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
+		if (retval < 0)
+			break;
+
+		retval = set_mb_job_params(job, qp, op, &digest_idx);
+		if (unlikely(retval != 0)) {
+			qp->stats.dequeue_err_count++;
+			set_job_null_op(job, op);
+		}
+
+		/* Submit job to multi-buffer for processing */
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
+		job = IMB_SUBMIT_JOB(qp->mb_mgr);
+#else
+		job = IMB_SUBMIT_JOB_NOCHECK(qp->mb_mgr);
+#endif
+		/*
+		 * If submit returns a processed job then handle it,
+		 * before submitting subsequent jobs
+		 */
+		if (job)
+			processed_jobs += handle_completed_jobs(qp, job,
+					&ops[processed_jobs],
+					nb_ops - processed_jobs);
+
+	} while (processed_jobs < nb_ops);
+
+	qp->digest_idx = digest_idx;
+
+	if (processed_jobs < 1)
+		processed_jobs += flush_mb_mgr(qp,
+				&ops[processed_jobs],
+				nb_ops - processed_jobs);
+
+	return processed_jobs;
+}
+
+static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
+
+static int
+cryptodev_aesni_mb_create(const char *name,
+			struct rte_vdev_device *vdev,
+			struct rte_cryptodev_pmd_init_params *init_params)
+{
+	struct rte_cryptodev *dev;
+	struct aesni_mb_private *internals;
+	enum aesni_mb_vector_mode vector_mode;
+	MB_MGR *mb_mgr;
+
+	/* Check CPU for support for AES instruction set */
+	if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+		AESNI_MB_LOG(ERR, "AES instructions not supported by CPU");
+		return -EFAULT;
+	}
+
+	dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+	if (dev == NULL) {
+		AESNI_MB_LOG(ERR, "failed to create cryptodev vdev");
+		return -ENODEV;
+	}
+
+	/* Check CPU for supported vector instruction set */
+	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+		vector_mode = RTE_AESNI_MB_AVX512;
+	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+		vector_mode = RTE_AESNI_MB_AVX2;
+	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+		vector_mode = RTE_AESNI_MB_AVX;
+	else
+		vector_mode = RTE_AESNI_MB_SSE;
+
+	dev->driver_id = cryptodev_driver_id;
+	dev->dev_ops = rte_aesni_mb_pmd_ops;
+
+	/* register rx/tx burst functions for data path */
+	dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
+	dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
+
+	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+			RTE_CRYPTODEV_FF_CPU_AESNI;
+
+	mb_mgr = alloc_mb_mgr(0);
+	if (mb_mgr == NULL)
+		return -ENOMEM;
+
+	switch (vector_mode) {
+	case RTE_AESNI_MB_SSE:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+		init_mb_mgr_sse(mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+		init_mb_mgr_avx(mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX2:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+		init_mb_mgr_avx2(mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX512:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+		init_mb_mgr_avx512(mb_mgr);
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
+		goto error_exit;
+	}
+
+	/* Set vector instructions mode supported */
+	internals = dev->data->dev_private;
+
+	internals->vector_mode = vector_mode;
+	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+	internals->mb_mgr = mb_mgr;
+
+	AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
+			imb_get_version_str());
+
+	return 0;
+
+error_exit:
+	if (mb_mgr)
+		free_mb_mgr(mb_mgr);
+
+	rte_cryptodev_pmd_destroy(dev);
+
+	return -1;
+}
+
+static int
+cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
+{
+	struct rte_cryptodev_pmd_init_params init_params = {
+		"",
+		sizeof(struct aesni_mb_private),
+		rte_socket_id(),
+		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+	};
+	const char *name, *args;
+	int retval;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	args = rte_vdev_device_args(vdev);
+
+	retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+	if (retval) {
+		AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]",
+				args);
+		return -EINVAL;
+	}
+
+	return cryptodev_aesni_mb_create(name, vdev, &init_params);
+}
+
+static int
+cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
+{
+	struct rte_cryptodev *cryptodev;
+	struct aesni_mb_private *internals;
+	const char *name;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	internals = cryptodev->data->dev_private;
+
+	free_mb_mgr(internals->mb_mgr);
+
+	return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
+	.probe = cryptodev_aesni_mb_probe,
+	.remove = cryptodev_aesni_mb_remove
+};
+
+static struct cryptodev_driver aesni_mb_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
+	"max_nb_queue_pairs=<int> "
+	"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
+		cryptodev_aesni_mb_pmd_drv.driver,
+		cryptodev_driver_id);
+
+RTE_INIT(aesni_mb_init_log)
+{
+	aesni_mb_logtype_driver = rte_log_register("pmd.crypto.aesni_mb");
+}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops_next.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops_next.c
new file mode 100755
index 000000000..5788e37d1
--- /dev/null
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops_next.c
@@ -0,0 +1,681 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2017 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_aesni_mb_pmd_private.h"
+
+
+static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 16,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 20,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA224 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 28,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 32,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 48,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 16,
+					.increment = 4
+				}
+			}, }
+		}, }
+	},
+	{	/* AES DOCSIS BPI */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/*  3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 8,
+					.max = 24,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* DES DOCSIS BPI */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
+				.block_size = 8,
+				.key_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 4,
+					.max = 16,
+					.increment = 2
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 46,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 7,
+					.max = 13,
+					.increment = 1
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 16,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 65535,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused struct rte_cryptodev_config *config)
+{
+	return 0;
+}
+
+/** Start device */
+static int
+aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+/** Stop device */
+static void
+aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+
+/** Get device statistics */
+static void
+aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->stats.enqueued_count;
+		stats->dequeued_count += qp->stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->stats.dequeue_err_count;
+	}
+}
+
+/** Reset device statistics */
+static void
+aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->stats, 0, sizeof(qp->stats));
+	}
+}
+
+
+/** Get device info */
+static void
+aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_info *dev_info)
+{
+	struct aesni_mb_private *internals = dev->data->dev_private;
+
+	if (dev_info != NULL) {
+		dev_info->driver_id = dev->driver_id;
+		dev_info->feature_flags = dev->feature_flags;
+		dev_info->capabilities = aesni_mb_pmd_capabilities;
+		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+		/* No limit of number of sessions */
+		dev_info->sym.max_nb_sessions = 0;
+	}
+}
+
+/** Release queue pair */
+static int
+aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+	struct rte_ring *r = NULL;
+
+	if (qp != NULL) {
+		r = rte_ring_lookup(qp->name);
+		if (r)
+			rte_ring_free(r);
+		if (qp->mb_mgr)
+			free_mb_mgr(qp->mb_mgr);
+		rte_free(qp);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+		struct aesni_mb_qp *qp)
+{
+	unsigned n = snprintf(qp->name, sizeof(qp->name),
+			"aesni_mb_pmd_%u_qp_%u",
+			dev->data->dev_id, qp->id);
+
+	if (n >= sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
+		unsigned int ring_size, int socket_id)
+{
+	struct rte_ring *r;
+	char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+	unsigned int n = snprintf(ring_name, sizeof(ring_name), "%s", qp->name);
+
+	if (n >= sizeof(ring_name))
+		return NULL;
+
+	r = rte_ring_lookup(ring_name);
+	if (r) {
+		if (rte_ring_get_size(r) >= ring_size) {
+			AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops",
+			ring_name);
+			return r;
+		}
+
+		AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops",
+			ring_name);
+		return NULL;
+	}
+
+	return rte_ring_create(ring_name, ring_size, socket_id,
+			RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		const struct rte_cryptodev_qp_conf *qp_conf,
+		int socket_id, struct rte_mempool *session_pool)
+{
+	struct aesni_mb_qp *qp = NULL;
+	struct aesni_mb_private *internals = dev->data->dev_private;
+	int ret = -1;
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		aesni_mb_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
+					RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL)
+		return -ENOMEM;
+
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
+		goto qp_setup_cleanup;
+
+
+	qp->mb_mgr = alloc_mb_mgr(0);
+	if (qp->mb_mgr == NULL) {
+		ret = -ENOMEM;
+		goto qp_setup_cleanup;
+	}
+
+	switch (internals->vector_mode) {
+	case RTE_AESNI_MB_SSE:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+		init_mb_mgr_sse(qp->mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+		init_mb_mgr_avx(qp->mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX2:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+		init_mb_mgr_avx2(qp->mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX512:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+		init_mb_mgr_avx512(qp->mb_mgr);
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n",
+				internals->vector_mode);
+		goto qp_setup_cleanup;
+	}
+
+	qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
+			qp_conf->nb_descriptors, socket_id);
+	if (qp->ingress_queue == NULL) {
+		ret = -1;
+		goto qp_setup_cleanup;
+	}
+
+	qp->sess_mp = session_pool;
+
+	memset(&qp->stats, 0, sizeof(qp->stats));
+
+	char mp_name[RTE_MEMPOOL_NAMESIZE];
+
+	snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+				"digest_mp_%u_%u", dev->data->dev_id, qp_id);
+	return 0;
+
+qp_setup_cleanup:
+	if (qp) {
+		if (qp->mb_mgr)
+			free_mb_mgr(qp->mb_mgr);
+		rte_free(qp);
+	}
+
+	return ret;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
+{
+	return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni multi-buffer session structure */
+static unsigned
+aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct aesni_mb_session);
+}
+
+/** Configure a aesni multi-buffer session from a crypto xform chain */
+static int
+aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_sym_xform *xform,
+		struct rte_cryptodev_sym_session *sess,
+		struct rte_mempool *mempool)
+{
+	void *sess_private_data;
+	struct aesni_mb_private *internals = dev->data->dev_private;
+	int ret;
+
+	if (unlikely(sess == NULL)) {
+		AESNI_MB_LOG(ERR, "invalid session struct");
+		return -EINVAL;
+	}
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		AESNI_MB_LOG(ERR,
+				"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	ret = aesni_mb_set_session_parameters(internals->mb_mgr,
+			sess_private_data, xform);
+	if (ret != 0) {
+		AESNI_MB_LOG(ERR, "failed configure session parameters");
+
+		/* Return session to mempool */
+		rte_mempool_put(mempool, sess_private_data);
+		return ret;
+	}
+
+	set_sym_session_private_data(sess, dev->driver_id,
+			sess_private_data);
+
+	return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
+		struct rte_cryptodev_sym_session *sess)
+{
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_sym_session_private_data(sess, index);
+
+	/* Zero out the whole structure */
+	if (sess_priv) {
+		memset(sess_priv, 0, sizeof(struct aesni_mb_session));
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+		set_sym_session_private_data(sess, index, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
+	}
+}
+
+struct rte_cryptodev_ops aesni_mb_pmd_ops = {
+		.dev_configure		= aesni_mb_pmd_config,
+		.dev_start		= aesni_mb_pmd_start,
+		.dev_stop		= aesni_mb_pmd_stop,
+		.dev_close		= aesni_mb_pmd_close,
+
+		.stats_get		= aesni_mb_pmd_stats_get,
+		.stats_reset		= aesni_mb_pmd_stats_reset,
+
+		.dev_infos_get		= aesni_mb_pmd_info_get,
+
+		.queue_pair_setup	= aesni_mb_pmd_qp_setup,
+		.queue_pair_release	= aesni_mb_pmd_qp_release,
+		.queue_pair_count	= aesni_mb_pmd_qp_count,
+
+		.sym_session_get_size	= aesni_mb_pmd_sym_session_get_size,
+		.sym_session_configure	= aesni_mb_pmd_sym_session_configure,
+		.sym_session_clear	= aesni_mb_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index d8021cdaa..34dd43095 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -5,7 +5,32 @@
 #ifndef _RTE_AESNI_MB_PMD_PRIVATE_H_
 #define _RTE_AESNI_MB_PMD_PRIVATE_H_
 
+#include <intel-ipsec-mb.h>
+
+
+/*
+ * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
+ * so if macro is not defined, it means that the version is 0.49.
+ */
+#if !defined(IMB_VERSION_NUM)
+#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
+#endif
+
+#if IMB_VERSION_NUM < IMB_VERSION(0, 52, 0)
 #include "aesni_mb_ops.h"
+#endif
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 52, 0)
+enum aesni_mb_vector_mode {
+	RTE_AESNI_MB_NOT_SUPPORTED = 0,
+	RTE_AESNI_MB_SSE,
+	RTE_AESNI_MB_AVX,
+	RTE_AESNI_MB_AVX2,
+	RTE_AESNI_MB_AVX512
+};
+#endif
+
 
 #define CRYPTODEV_NAME_AESNI_MB_PMD	crypto_aesni_mb
 /**< AES-NI Multi buffer PMD device name */
@@ -83,7 +108,9 @@ static const unsigned auth_digest_byte_lengths[] = {
 		[AES_XCBC]	= 16,
 		[AES_CMAC]	= 16,
 		[AES_GMAC]	= 12,
-		[NULL_HASH]		= 0
+		[NULL_HASH]	= 0,
+	/**< Vector mode dependent pointer table of the multi-buffer APIs */
+
 };
 
 /**
@@ -115,6 +142,10 @@ struct aesni_mb_private {
 	/**< CPU vector instruction set mode */
 	unsigned max_nb_queue_pairs;
 	/**< Max number of queue pairs supported by device */
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 52, 0)
+	MB_MGR *mb_mgr;
+	/**< Multi-buffer instance */
+#endif
 };
 
 /** AESNI Multi buffer queue pair */
@@ -122,13 +153,15 @@ struct aesni_mb_qp {
 	uint16_t id;
 	/**< Queue Pair Identifier */
 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+#if IMB_VERSION_NUM < IMB_VERSION(0, 52, 0)
 	/**< Unique Queue Pair Name */
 	const struct aesni_mb_op_fns *op_fns;
-	/**< Vector mode dependent pointer table of the multi-buffer APIs */
+#endif
+	/**< Unique Queue Pair Name */
 	MB_MGR *mb_mgr;
 	/**< Multi-buffer instance */
 	struct rte_ring *ingress_queue;
-       /**< Ring for placing operations ready for processing */
+        /**< Ring for placing operations ready for processing */
 	struct rte_mempool *sess_mp;
 	/**< Session Mempool */
 	struct rte_cryptodev_stats stats;
@@ -153,7 +186,9 @@ struct aesni_mb_session {
 	} iv;
 	/**< IV parameters */
 
-	/** Cipher Parameters */
+	/** Cipher Parameters */const struct aesni_mb_op_fns *op_fns;
+	/**< Vector mode dependent pointer table of the multi-buffer APIs */
+
 	struct {
 		/** Cipher direction - encrypt / decrypt */
 		JOB_CIPHER_DIRECTION direction;
@@ -234,14 +269,21 @@ struct aesni_mb_session {
 } __rte_cache_aligned;
 
 
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 52, 0)
 /**
  *
  */
 extern int
+aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform);
+#else
+extern int
 aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
 		struct aesni_mb_session *sess,
 		const struct rte_crypto_sym_xform *xform);
-
+#endif
 
 /** device specific operations function pointer structure */
 extern struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops;
-- 
2.13.6

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [dpdk-dev] [PATCH v2] crypto/aesni_mb: use architure independent marcos
  2018-12-11 12:29 ` [dpdk-dev] [PATCH v2] crypto/aesni_mb: use architure independent marcos Fan Zhang
@ 2018-12-18 10:26   ` Akhil Goyal
  2018-12-19 13:08   ` Thomas Monjalon
  2018-12-19 20:16   ` [dpdk-dev] [PATCH v3 0/4] use architecure independent macros Fan Zhang
  2 siblings, 0 replies; 15+ messages in thread
From: Akhil Goyal @ 2018-12-18 10:26 UTC (permalink / raw)
  To: Fan Zhang, dev; +Cc: Lukasz Krakowiak



On 12/11/2018 5:59 PM, Fan Zhang wrote:
> From: Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
>
> This patch updates the aesni_mb to use IMB_* arch independent
> macros to reduce the code size and future maintaining effort.
>
> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> Signed-off-by: Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
> ---
> v2:
> - making the PMD compatible with both new intel-ipsec-mb version 0.52 and older
> - fixed a bug
>
>
Applied to dpdk-next-crypto

Thanks

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [dpdk-dev] [PATCH v2] crypto/aesni_mb: use architure independent marcos
  2018-12-11 12:29 ` [dpdk-dev] [PATCH v2] crypto/aesni_mb: use architure independent marcos Fan Zhang
  2018-12-18 10:26   ` Akhil Goyal
@ 2018-12-19 13:08   ` Thomas Monjalon
  2018-12-19 13:48     ` Zhang, Roy Fan
  2018-12-19 20:16   ` [dpdk-dev] [PATCH v3 0/4] use architecure independent macros Fan Zhang
  2 siblings, 1 reply; 15+ messages in thread
From: Thomas Monjalon @ 2018-12-19 13:08 UTC (permalink / raw)
  To: Fan Zhang; +Cc: dev, akhil.goyal, Lukasz Krakowiak

11/12/2018 13:29, Fan Zhang:
> From: Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
> 
> This patch updates the aesni_mb to use IMB_* arch independent
> macros to reduce the code size and future maintaining effort.
> 
> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> Signed-off-by: Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
> ---
> v2:
> - making the PMD compatible with both new intel-ipsec-mb version 0.52 and older
> - fixed a bug
> 
>  drivers/crypto/aesni_mb/Makefile                   |   24 +-
>  drivers/crypto/aesni_mb/meson.build                |   14 +-
>  drivers/crypto/aesni_mb/rte_aesni_mb_pmd_next.c    | 1237 ++++++++++++++++++++
>  .../crypto/aesni_mb/rte_aesni_mb_pmd_ops_next.c    |  681 +++++++++++
>  drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h |   52 +-
>  5 files changed, 1998 insertions(+), 10 deletions(-)
>  create mode 100755 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_next.c
>  create mode 100755 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops_next.c
> 
> diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile
> index 806a95eb8..24630a6ca 100644
> --- a/drivers/crypto/aesni_mb/Makefile
> +++ b/drivers/crypto/aesni_mb/Makefile
> @@ -22,8 +22,26 @@ LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
>  LDLIBS += -lrte_cryptodev
>  LDLIBS += -lrte_bus_vdev
>  
> -# library source files
> -SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
> -SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c
> +IMB_HDR = /usr/include/intel-ipsec-mb.h
> +
> +# Detect library version
> +IMB_VERSION = $(shell grep -e "IMB_VERSION_STR" $(IMB_HDR) | cut -d'"' -f2)
> +IMB_VERSION_NUM = $(shell grep -e "IMB_VERSION_NUM" $(IMB_HDR) | cut -d' ' -f3)
> +
> +ifeq ($(IMB_VERSION),)
> +	# files for older version of IMB
> +	SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
> +	SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c
> +else
> +	ifeq ($(shell expr $(IMB_VERSION_NUM) \>= 0x3400), 1)
> +		# files for a new version of IMB
> +		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_next.c
> +		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops_next.c
> +	else
> +		# files for older version of IMB
> +		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
> +		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c
> +	endif
> +endif
>  
>  include $(RTE_SDK)/mk/rte.lib.mk
> diff --git a/drivers/crypto/aesni_mb/meson.build b/drivers/crypto/aesni_mb/meson.build
> index aae0995e5..490f68eaf 100644
> --- a/drivers/crypto/aesni_mb/meson.build
> +++ b/drivers/crypto/aesni_mb/meson.build
> @@ -1,6 +1,6 @@
>  # SPDX-License-Identifier: BSD-3-Clause
>  # Copyright(c) 2018 Intel Corporation
> -
> +IPSec_MB_ver_0_52 = '0.52.0'
>  lib = cc.find_library('IPSec_MB', required: false)
>  if not lib.found()
>  	build = false
> @@ -8,5 +8,15 @@ else
>  	ext_deps += lib
>  endif
>  
> -sources = files('rte_aesni_mb_pmd.c', 'rte_aesni_mb_pmd_ops.c')
> +imb_version = cc.get_define('IMB_VERSION_STR',
> +        prefix : '#include<intel-ipsec-mb.h>')
> +
> +if imb_version.version_compare('>=' + IPSec_MB_ver_0_52)
> +	message('Build for a new version of library IPSec_MB[' + imb_version + ']')
> +	sources = files('rte_aesni_mb_pmd_next.c', 'rte_aesni_mb_pmd_ops_next.c')
> +else
> +	message('Build for older version of library IPSec_MB[' + imb_version + ']')
> +	sources = files('rte_aesni_mb_pmd.c', 'rte_aesni_mb_pmd_ops.c')
> +endif
> +
>  deps += ['bus_vdev']

I don't know what you are trying to do, but I know it is not explained.
Adding files "*_next.c" looks to be a bad idea.
And worst: it does not compile with meson:
	drivers/crypto/aesni_mb/meson.build:11:0: ERROR:  Could not get define 'IMB_VERSION_STR'

This patch is a total mess which must be explained, tested and split in several patches.
I drop it from the merge to master and update all related AES patches
to "Changes Requested" in patchwork.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [dpdk-dev] [PATCH v2] crypto/aesni_mb: use architure independent marcos
  2018-12-19 13:08   ` Thomas Monjalon
@ 2018-12-19 13:48     ` Zhang, Roy Fan
  0 siblings, 0 replies; 15+ messages in thread
From: Zhang, Roy Fan @ 2018-12-19 13:48 UTC (permalink / raw)
  To: Thomas Monjalon; +Cc: dev, akhil.goyal, Krakowiak, LukaszX, Yigit, Ferruh

Hi Thomas,

Sorry the patch caused the problem.
I have tested the patch with intel-ipsec-mb 0.50/0.52 library with make and did not find problem.

Once switching between versions of ipsec-mb, a necessary "make uninstall" has to be done to clear old in /usr.

However I didn't test meson in 0.50, sorry about that.

The purpose of the patch is to fit the changes made to the latest intel-ipsec-mb code with newly introduced API.
Using the new APIs (macros) newly introduced have the benefit of way easier maintenance effort for different architectures and massively reduce the code size.

However using the new APIs only will cause the inconsistence compatibility to the user who uses older APIs. 

Ferruh and I had the talk and come with the solution to prepare both old and new code for 19.02, and make the compiler selects which files to be compiled based on the detected ipsec-mb version. We also planned to make a deprecation notice for 19.05 to drop the support of older version of ipsec-mb library, along with replacing the *_new* files with the existing one.

Regards,
Fan

> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas@monjalon.net]
> Sent: Wednesday, December 19, 2018 1:09 PM
> To: Zhang, Roy Fan <roy.fan.zhang@intel.com>
> Cc: dev@dpdk.org; akhil.goyal@nxp.com; Krakowiak, LukaszX
> <lukaszx.krakowiak@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v2] crypto/aesni_mb: use architure
> independent marcos
> 
> 11/12/2018 13:29, Fan Zhang:
> > From: Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
> >
> > This patch updates the aesni_mb to use IMB_* arch independent macros
> > to reduce the code size and future maintaining effort.
> >
> > Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> > Signed-off-by: Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
> > ---
> > v2:
> > - making the PMD compatible with both new intel-ipsec-mb version 0.52
> > and older
> > - fixed a bug
> >
> >  drivers/crypto/aesni_mb/Makefile                   |   24 +-
> >  drivers/crypto/aesni_mb/meson.build                |   14 +-
> >  drivers/crypto/aesni_mb/rte_aesni_mb_pmd_next.c    | 1237
> ++++++++++++++++++++
> >  .../crypto/aesni_mb/rte_aesni_mb_pmd_ops_next.c    |  681
> +++++++++++
> >  drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h |   52 +-
> >  5 files changed, 1998 insertions(+), 10 deletions(-)  create mode
> > 100755 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_next.c
> >  create mode 100755
> > drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops_next.c
> >
> > diff --git a/drivers/crypto/aesni_mb/Makefile
> > b/drivers/crypto/aesni_mb/Makefile
> > index 806a95eb8..24630a6ca 100644
> > --- a/drivers/crypto/aesni_mb/Makefile
> > +++ b/drivers/crypto/aesni_mb/Makefile
> > @@ -22,8 +22,26 @@ LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
> > -lrte_ring  LDLIBS += -lrte_cryptodev  LDLIBS += -lrte_bus_vdev
> >
> > -# library source files
> > -SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
> > -SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) +=
> rte_aesni_mb_pmd_ops.c
> > +IMB_HDR = /usr/include/intel-ipsec-mb.h
> > +
> > +# Detect library version
> > +IMB_VERSION = $(shell grep -e "IMB_VERSION_STR" $(IMB_HDR) | cut
> > +-d'"' -f2) IMB_VERSION_NUM = $(shell grep -e "IMB_VERSION_NUM"
> > +$(IMB_HDR) | cut -d' ' -f3)
> > +
> > +ifeq ($(IMB_VERSION),)
> > +	# files for older version of IMB
> > +	SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) +=
> rte_aesni_mb_pmd.c
> > +	SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) +=
> rte_aesni_mb_pmd_ops.c
> > +else
> > +	ifeq ($(shell expr $(IMB_VERSION_NUM) \>= 0x3400), 1)
> > +		# files for a new version of IMB
> > +		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) +=
> rte_aesni_mb_pmd_next.c
> > +		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) +=
> rte_aesni_mb_pmd_ops_next.c
> > +	else
> > +		# files for older version of IMB
> > +		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) +=
> rte_aesni_mb_pmd.c
> > +		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) +=
> rte_aesni_mb_pmd_ops.c
> > +	endif
> > +endif
> >
> >  include $(RTE_SDK)/mk/rte.lib.mk
> > diff --git a/drivers/crypto/aesni_mb/meson.build
> > b/drivers/crypto/aesni_mb/meson.build
> > index aae0995e5..490f68eaf 100644
> > --- a/drivers/crypto/aesni_mb/meson.build
> > +++ b/drivers/crypto/aesni_mb/meson.build
> > @@ -1,6 +1,6 @@
> >  # SPDX-License-Identifier: BSD-3-Clause  # Copyright(c) 2018 Intel
> > Corporation
> > -
> > +IPSec_MB_ver_0_52 = '0.52.0'
> >  lib = cc.find_library('IPSec_MB', required: false)  if not
> > lib.found()
> >  	build = false
> > @@ -8,5 +8,15 @@ else
> >  	ext_deps += lib
> >  endif
> >
> > -sources = files('rte_aesni_mb_pmd.c', 'rte_aesni_mb_pmd_ops.c')
> > +imb_version = cc.get_define('IMB_VERSION_STR',
> > +        prefix : '#include<intel-ipsec-mb.h>')
> > +
> > +if imb_version.version_compare('>=' + IPSec_MB_ver_0_52)
> > +	message('Build for a new version of library IPSec_MB[' + imb_version
> + ']')
> > +	sources = files('rte_aesni_mb_pmd_next.c',
> > +'rte_aesni_mb_pmd_ops_next.c') else
> > +	message('Build for older version of library IPSec_MB[' + imb_version
> + ']')
> > +	sources = files('rte_aesni_mb_pmd.c', 'rte_aesni_mb_pmd_ops.c')
> > +endif
> > +
> >  deps += ['bus_vdev']
> 
> I don't know what you are trying to do, but I know it is not explained.
> Adding files "*_next.c" looks to be a bad idea.
> And worst: it does not compile with meson:
> 	drivers/crypto/aesni_mb/meson.build:11:0: ERROR:  Could not get
> define 'IMB_VERSION_STR'
> 
> This patch is a total mess which must be explained, tested and split in several
> patches.
> I drop it from the merge to master and update all related AES patches to
> "Changes Requested" in patchwork.
> 
> 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v3 0/4] use architecure independent macros
  2018-12-11 12:29 ` [dpdk-dev] [PATCH v2] crypto/aesni_mb: use architure independent marcos Fan Zhang
  2018-12-18 10:26   ` Akhil Goyal
  2018-12-19 13:08   ` Thomas Monjalon
@ 2018-12-19 20:16   ` Fan Zhang
  2018-12-19 20:16     ` [dpdk-dev] [PATCH v3 1/4] crypto/aesni_mb: rename files to compatible Fan Zhang
                       ` (4 more replies)
  2 siblings, 5 replies; 15+ messages in thread
From: Fan Zhang @ 2018-12-19 20:16 UTC (permalink / raw)
  To: dev; +Cc: akhil.goyal, thomas

This patch updates the aesni_mb to use IMB_* arch independent macros to
reduce the code size and future maintaining effort.

In intel-ipsec-mb library 0.52 all supported algorithms now have the IMB_*
arch independent macros enabled. The macros help reducing the application's
code size and remove the burden of maintaining the support to different
architectures such as SSE and AVX*, etc.

This patch adds this support into AESNI-MB PMD. Meanwhile to keep
supporting the older version of intel-ipsec-mb library, the existing
rte_aesni_mb_pmd*.c are renamed to rte_aesni_mb_pmd*_compat.c and the
compiler will check the version number in /usr/include/inte-ipsec-mb.h and
decides which files to be compiled. For intel-ipsec-mb library 0.52 the
rte_aesni_mb_pmd*.c will be compiled. For the older version
rte_aesni_mb_pmd*_compat.c will be compiled.

It is planned to change the minimum intel-ipsec-mb support version to 0.52
in DPDK 19.05 release. By then all code intended for supporting older
version will be removed, including rte_aesni_mb_pmd*_compat.c.

Acked-by: Damian Nowak <damianx.nowak@intel.com>

v3:
- patch split.
- fixed meson build bug.
- updated commit message.
- updated documentation.

v2:
- making the PMD compatible with both new intel-ipsec-mb version 0.52 and older
- fixed a bug


Fan Zhang (4):
  crypto/aesni_mb: rename files to compatible
  crypto/aesni_mb: use architecture independent macros
  doc: update library support version
  doc: update deprecation notice

 doc/guides/cryptodevs/aesni_mb.rst                 |    4 +-
 doc/guides/rel_notes/deprecation.rst               |    3 +
 drivers/crypto/aesni_mb/Makefile                   |   26 +-
 drivers/crypto/aesni_mb/meson.build                |   18 +-
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c         |  190 ++-
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_compat.c  | 1239 ++++++++++++++++++++
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c     |   86 +-
 .../crypto/aesni_mb/rte_aesni_mb_pmd_ops_compat.c  |  719 ++++++++++++
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h |   52 +-
 9 files changed, 2166 insertions(+), 171 deletions(-)
 create mode 100644 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_compat.c
 create mode 100644 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops_compat.c

-- 
2.13.6

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v3 1/4] crypto/aesni_mb: rename files to compatible
  2018-12-19 20:16   ` [dpdk-dev] [PATCH v3 0/4] use architecure independent macros Fan Zhang
@ 2018-12-19 20:16     ` Fan Zhang
  2018-12-19 20:16     ` [dpdk-dev] [PATCH v3 2/4] crypto/aesni_mb: use architecture independent macros Fan Zhang
                       ` (3 subsequent siblings)
  4 siblings, 0 replies; 15+ messages in thread
From: Fan Zhang @ 2018-12-19 20:16 UTC (permalink / raw)
  To: dev; +Cc: akhil.goyal, thomas, Lukasz Krakowiak

This patch renames the rte_aesni_mb_pmd*.c to
rte_aesni_mb_pmd*_compat.c to indicate those files are for
compatible with older versoin of intel-ipsec-mb library
only.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
Acked-by: Damian Nowak <damianx.nowak@intel.com>
---
 drivers/crypto/aesni_mb/Makefile                                      | 4 ++--
 drivers/crypto/aesni_mb/meson.build                                   | 2 +-
 .../crypto/aesni_mb/{rte_aesni_mb_pmd.c => rte_aesni_mb_pmd_compat.c} | 0
 .../{rte_aesni_mb_pmd_ops.c => rte_aesni_mb_pmd_ops_compat.c}         | 0
 4 files changed, 3 insertions(+), 3 deletions(-)
 rename drivers/crypto/aesni_mb/{rte_aesni_mb_pmd.c => rte_aesni_mb_pmd_compat.c} (100%)
 rename drivers/crypto/aesni_mb/{rte_aesni_mb_pmd_ops.c => rte_aesni_mb_pmd_ops_compat.c} (100%)

diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile
index 806a95eb8..5a8671cd4 100644
--- a/drivers/crypto/aesni_mb/Makefile
+++ b/drivers/crypto/aesni_mb/Makefile
@@ -23,7 +23,7 @@ LDLIBS += -lrte_cryptodev
 LDLIBS += -lrte_bus_vdev
 
 # library source files
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_compat.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops_compat.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/aesni_mb/meson.build b/drivers/crypto/aesni_mb/meson.build
index aae0995e5..ed68c7f39 100644
--- a/drivers/crypto/aesni_mb/meson.build
+++ b/drivers/crypto/aesni_mb/meson.build
@@ -8,5 +8,5 @@ else
 	ext_deps += lib
 endif
 
-sources = files('rte_aesni_mb_pmd.c', 'rte_aesni_mb_pmd_ops.c')
+sources = files('rte_aesni_mb_pmd_compat.c', 'rte_aesni_mb_pmd_ops_compat.c')
 deps += ['bus_vdev']
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_compat.c
similarity index 100%
rename from drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
rename to drivers/crypto/aesni_mb/rte_aesni_mb_pmd_compat.c
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops_compat.c
similarity index 100%
rename from drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
rename to drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops_compat.c
-- 
2.13.6

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v3 2/4] crypto/aesni_mb: use architecture independent macros
  2018-12-19 20:16   ` [dpdk-dev] [PATCH v3 0/4] use architecure independent macros Fan Zhang
  2018-12-19 20:16     ` [dpdk-dev] [PATCH v3 1/4] crypto/aesni_mb: rename files to compatible Fan Zhang
@ 2018-12-19 20:16     ` Fan Zhang
  2018-12-19 20:16     ` [dpdk-dev] [PATCH v3 3/4] doc: update library support version Fan Zhang
                       ` (2 subsequent siblings)
  4 siblings, 0 replies; 15+ messages in thread
From: Fan Zhang @ 2018-12-19 20:16 UTC (permalink / raw)
  To: dev; +Cc: akhil.goyal, thomas, Lukasz Krakowiak

This patch duplicates the original rte_aesni_mb_pmd*.c files and replaces
the function calls provided by intel-ipsec-mb library into
architecture-independent macros. The build systems are updated to choose
compiling either rte_aesni_mb_pmd*.c or rte_aesni_mb_pmd*_compat.c based
on the installed intel-ipsec-mb version. For the intel-ipsec-mb older
than 0.52.0 rte_aesni_mb_pmd*_compat.c will be compiled, otherwise
rte_aesni_mb_pmd*.c will be compiled.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
Acked-by: Damian Nowak <damianx.nowak@intel.com>
---
 drivers/crypto/aesni_mb/Makefile                   |   26 +-
 drivers/crypto/aesni_mb/meson.build                |   18 +-
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c         | 1237 ++++++++++++++++++++
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c     |  681 +++++++++++
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h |   52 +-
 5 files changed, 2003 insertions(+), 11 deletions(-)
 create mode 100644 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
 create mode 100644 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c

diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile
index 5a8671cd4..c2bda5838 100644
--- a/drivers/crypto/aesni_mb/Makefile
+++ b/drivers/crypto/aesni_mb/Makefile
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2015 Intel Corporation
+# Copyright(c) 2015-2018 Intel Corporation
 
 include $(RTE_SDK)/mk/rte.vars.mk
 
@@ -22,8 +22,26 @@ LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
 LDLIBS += -lrte_cryptodev
 LDLIBS += -lrte_bus_vdev
 
-# library source files
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_compat.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops_compat.c
+IMB_HDR = /usr/include/intel-ipsec-mb.h
+
+# Detect library version
+IMB_VERSION = $(shell grep -e "IMB_VERSION_STR" $(IMB_HDR) | cut -d'"' -f2)
+IMB_VERSION_NUM = $(shell grep -e "IMB_VERSION_NUM" $(IMB_HDR) | cut -d' ' -f3)
+
+ifeq ($(IMB_VERSION),)
+	# files for older version of IMB
+	SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_compat.c
+	SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops_compat.c
+else
+	ifeq ($(shell expr $(IMB_VERSION_NUM) \>= 0x3400), 1)
+		# files for a new version of IMB
+		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
+		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c
+	else
+		# files for older version of IMB
+		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_compat.c
+		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops_compat.c
+	endif
+endif
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/aesni_mb/meson.build b/drivers/crypto/aesni_mb/meson.build
index ed68c7f39..b5a7fa7f7 100644
--- a/drivers/crypto/aesni_mb/meson.build
+++ b/drivers/crypto/aesni_mb/meson.build
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2018 Intel Corporation
-
+IPSec_MB_ver_0_52 = '0.52.0'
 lib = cc.find_library('IPSec_MB', required: false)
 if not lib.found()
 	build = false
@@ -8,5 +8,19 @@ else
 	ext_deps += lib
 endif
 
-sources = files('rte_aesni_mb_pmd_compat.c', 'rte_aesni_mb_pmd_ops_compat.c')
+imb_arr = cc.get_define('IMB_VERSION_STR',
+	prefix : '#include<intel-ipsec-mb.h>').split('"')
+
+imb_ver =''.join(imb_arr)
+
+if imb_ver.version_compare('>' + IPSec_MB_ver_0_52)
+	message('Build for a new version of library IPSec_MB[' + imb_ver + ']')
+	sources = files('rte_aesni_mb_pmd.c',
+		'rte_aesni_mb_pmd_ops.c')
+else
+	sources = files('rte_aesni_mb_pmd_compat.c',
+		'rte_aesni_mb_pmd_ops_compat.c')
+	message('Build for older version of library IPSec_MB[' + imb_ver + ']')
+endif
+
 deps += ['bus_vdev']
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
new file mode 100644
index 000000000..2c25b7b32
--- /dev/null
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -0,0 +1,1237 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2017 Intel Corporation
+ */
+
+#include <intel-ipsec-mb.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_aesni_mb_pmd_private.h"
+
+#define AES_CCM_DIGEST_MIN_LEN 4
+#define AES_CCM_DIGEST_MAX_LEN 16
+#define HMAC_MAX_BLOCK_SIZE 128
+static uint8_t cryptodev_driver_id;
+
+typedef void (*hash_one_block_t)(const void *data, void *digest);
+typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
+
+/**
+ * Calculate the authentication pre-computes
+ *
+ * @param one_block_hash	Function pointer to calculate digest on ipad/opad
+ * @param ipad			Inner pad output byte array
+ * @param opad			Outer pad output byte array
+ * @param hkey			Authentication key
+ * @param hkey_len		Authentication key length
+ * @param blocksize		Block size of selected hash algo
+ */
+static void
+calculate_auth_precomputes(hash_one_block_t one_block_hash,
+		uint8_t *ipad, uint8_t *opad,
+		uint8_t *hkey, uint16_t hkey_len,
+		uint16_t blocksize)
+{
+	unsigned i, length;
+
+	uint8_t ipad_buf[blocksize] __rte_aligned(16);
+	uint8_t opad_buf[blocksize] __rte_aligned(16);
+
+	/* Setup inner and outer pads */
+	memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
+	memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
+
+	/* XOR hash key with inner and outer pads */
+	length = hkey_len > blocksize ? blocksize : hkey_len;
+
+	for (i = 0; i < length; i++) {
+		ipad_buf[i] ^= hkey[i];
+		opad_buf[i] ^= hkey[i];
+	}
+
+	/* Compute partial hashes */
+	(*one_block_hash)(ipad_buf, ipad);
+	(*one_block_hash)(opad_buf, opad);
+
+	/* Clean up stack */
+	memset(ipad_buf, 0, blocksize);
+	memset(opad_buf, 0, blocksize);
+}
+
+/** Get xform chain order */
+static enum aesni_mb_operation
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+	if (xform == NULL)
+		return AESNI_MB_OP_NOT_SUPPORTED;
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (xform->next == NULL)
+			return AESNI_MB_OP_CIPHER_ONLY;
+		if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+			return AESNI_MB_OP_CIPHER_HASH;
+	}
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (xform->next == NULL)
+			return AESNI_MB_OP_HASH_ONLY;
+		if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+			return AESNI_MB_OP_HASH_CIPHER;
+	}
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM ||
+				xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
+			if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+				return AESNI_MB_OP_AEAD_CIPHER_HASH;
+			else
+				return AESNI_MB_OP_AEAD_HASH_CIPHER;
+		}
+	}
+
+	return AESNI_MB_OP_NOT_SUPPORTED;
+}
+
+/** Set session authentication parameters */
+static int
+aesni_mb_set_session_auth_parameters(const MB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	hash_one_block_t hash_oneblock_fn;
+	unsigned int key_larger_block_size = 0;
+	uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
+
+	if (xform == NULL) {
+		sess->auth.algo = NULL_HASH;
+		return 0;
+	}
+
+	if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
+		AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth");
+		return -1;
+	}
+
+	/* Set the request digest size */
+	sess->auth.req_digest_len = xform->auth.digest_length;
+
+	/* Select auth generate/verify */
+	sess->auth.operation = xform->auth.op;
+
+	/* Set Authentication Parameters */
+	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
+		sess->auth.algo = AES_XCBC;
+
+		uint16_t xcbc_mac_digest_len =
+			get_truncated_digest_byte_length(AES_XCBC);
+		if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
+			AESNI_MB_LOG(ERR, "Invalid digest size\n");
+			return -EINVAL;
+		}
+		sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+		IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
+				sess->auth.xcbc.k1_expanded,
+				sess->auth.xcbc.k2, sess->auth.xcbc.k3);
+		return 0;
+	}
+
+	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
+		uint32_t dust[4*15];
+
+		sess->auth.algo = AES_CMAC;
+
+		uint16_t cmac_digest_len = get_digest_byte_length(AES_CMAC);
+
+		if (sess->auth.req_digest_len > cmac_digest_len) {
+			AESNI_MB_LOG(ERR, "Invalid digest size\n");
+			return -EINVAL;
+		}
+		/*
+		 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
+		 * in version 0.50 and sizes of 12 and 16 bytes,
+		 * in version 0.49.
+		 * If size requested is different, generate the full digest
+		 * (16 bytes) in a temporary location and then memcpy
+		 * the requested number of bytes.
+		 */
+		if (sess->auth.req_digest_len < 4)
+			sess->auth.gen_digest_len = cmac_digest_len;
+		else
+			sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+		IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
+				sess->auth.cmac.expkey, dust);
+		IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
+				sess->auth.cmac.skey1, sess->auth.cmac.skey2);
+		return 0;
+	}
+
+	switch (xform->auth.algo) {
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		sess->auth.algo = MD5;
+		hash_oneblock_fn = mb_mgr->md5_one_block;
+		break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		sess->auth.algo = SHA1;
+		hash_oneblock_fn = mb_mgr->sha1_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(SHA1)) {
+			IMB_SHA1(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		sess->auth.algo = SHA_224;
+		hash_oneblock_fn = mb_mgr->sha224_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_224)) {
+			IMB_SHA224(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		sess->auth.algo = SHA_256;
+		hash_oneblock_fn = mb_mgr->sha256_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_256)) {
+			IMB_SHA256(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		sess->auth.algo = SHA_384;
+		hash_oneblock_fn = mb_mgr->sha384_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_384)) {
+			IMB_SHA384(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		sess->auth.algo = SHA_512;
+		hash_oneblock_fn = mb_mgr->sha512_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_512)) {
+			IMB_SHA512(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
+		return -ENOTSUP;
+	}
+	uint16_t trunc_digest_size =
+			get_truncated_digest_byte_length(sess->auth.algo);
+	uint16_t full_digest_size =
+			get_digest_byte_length(sess->auth.algo);
+
+	if (sess->auth.req_digest_len > full_digest_size ||
+			sess->auth.req_digest_len == 0) {
+		AESNI_MB_LOG(ERR, "Invalid digest size\n");
+		return -EINVAL;
+	}
+
+	if (sess->auth.req_digest_len != trunc_digest_size &&
+			sess->auth.req_digest_len != full_digest_size)
+		sess->auth.gen_digest_len = full_digest_size;
+	else
+		sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+	/* Calculate Authentication precomputes */
+	if (key_larger_block_size) {
+		calculate_auth_precomputes(hash_oneblock_fn,
+			sess->auth.pads.inner, sess->auth.pads.outer,
+			hashed_key,
+			xform->auth.key.length,
+			get_auth_algo_blocksize(sess->auth.algo));
+	} else {
+		calculate_auth_precomputes(hash_oneblock_fn,
+			sess->auth.pads.inner, sess->auth.pads.outer,
+			xform->auth.key.data,
+			xform->auth.key.length,
+			get_auth_algo_blocksize(sess->auth.algo));
+	}
+
+	return 0;
+}
+
+/** Set session cipher parameters */
+static int
+aesni_mb_set_session_cipher_parameters(const MB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	uint8_t is_aes = 0;
+	uint8_t is_3DES = 0;
+
+	if (xform == NULL) {
+		sess->cipher.mode = NULL_CIPHER;
+		return 0;
+	}
+
+	if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher");
+		return -EINVAL;
+	}
+
+	/* Select cipher direction */
+	switch (xform->cipher.op) {
+	case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
+		sess->cipher.direction = ENCRYPT;
+		break;
+	case RTE_CRYPTO_CIPHER_OP_DECRYPT:
+		sess->cipher.direction = DECRYPT;
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Invalid cipher operation parameter");
+		return -EINVAL;
+	}
+
+	/* Select cipher mode */
+	switch (xform->cipher.algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		sess->cipher.mode = CBC;
+		is_aes = 1;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		sess->cipher.mode = CNTR;
+		is_aes = 1;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+		sess->cipher.mode = DOCSIS_SEC_BPI;
+		is_aes = 1;
+		break;
+	case RTE_CRYPTO_CIPHER_DES_CBC:
+		sess->cipher.mode = DES;
+		break;
+	case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+		sess->cipher.mode = DOCSIS_DES;
+		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		sess->cipher.mode = DES3;
+		is_3DES = 1;
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter");
+		return -ENOTSUP;
+	}
+
+	/* Set IV parameters */
+	sess->iv.offset = xform->cipher.iv.offset;
+	sess->iv.length = xform->cipher.iv.length;
+
+	/* Check key length and choose key expansion function for AES */
+	if (is_aes) {
+		switch (xform->cipher.key.length) {
+		case AES_128_BYTES:
+			sess->cipher.key_length_in_bytes = AES_128_BYTES;
+			IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		case AES_192_BYTES:
+			sess->cipher.key_length_in_bytes = AES_192_BYTES;
+			IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		case AES_256_BYTES:
+			sess->cipher.key_length_in_bytes = AES_256_BYTES;
+			IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		default:
+			AESNI_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+	} else if (is_3DES) {
+		uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
+				sess->cipher.exp_3des_keys.key[1],
+				sess->cipher.exp_3des_keys.key[2]};
+
+		switch (xform->cipher.key.length) {
+		case  24:
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
+			IMB_DES_KEYSCHED(mb_mgr, keys[1],
+					xform->cipher.key.data + 8);
+			IMB_DES_KEYSCHED(mb_mgr, keys[2],
+					xform->cipher.key.data + 16);
+
+			/* Initialize keys - 24 bytes: [K1-K2-K3] */
+			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
+			break;
+		case 16:
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
+			IMB_DES_KEYSCHED(mb_mgr, keys[1],
+					xform->cipher.key.data + 8);
+			/* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
+			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+			break;
+		case 8:
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
+
+			/* Initialize keys - 8 bytes: [K1 = K2 = K3] */
+			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
+			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+			break;
+		default:
+			AESNI_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+
+		sess->cipher.key_length_in_bytes = 24;
+	} else {
+		if (xform->cipher.key.length != 8) {
+			AESNI_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+		sess->cipher.key_length_in_bytes = 8;
+
+		IMB_DES_KEYSCHED(mb_mgr,
+			(uint64_t *)sess->cipher.expanded_aes_keys.encode,
+				xform->cipher.key.data);
+		IMB_DES_KEYSCHED(mb_mgr,
+			(uint64_t *)sess->cipher.expanded_aes_keys.decode,
+				xform->cipher.key.data);
+	}
+
+	return 0;
+}
+
+static int
+aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	switch (xform->aead.op) {
+	case RTE_CRYPTO_AEAD_OP_ENCRYPT:
+		sess->cipher.direction = ENCRYPT;
+		sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
+		break;
+	case RTE_CRYPTO_AEAD_OP_DECRYPT:
+		sess->cipher.direction = DECRYPT;
+		sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Invalid aead operation parameter");
+		return -EINVAL;
+	}
+
+	switch (xform->aead.algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		sess->cipher.mode = CCM;
+		sess->auth.algo = AES_CCM;
+
+		/* Check key length and choose key expansion function for AES */
+		switch (xform->aead.key.length) {
+		case AES_128_BYTES:
+			sess->cipher.key_length_in_bytes = AES_128_BYTES;
+			IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		default:
+			AESNI_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+
+		break;
+
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		sess->cipher.mode = GCM;
+		sess->auth.algo = AES_GMAC;
+
+		switch (xform->aead.key.length) {
+		case AES_128_BYTES:
+			sess->cipher.key_length_in_bytes = AES_128_BYTES;
+			IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
+			break;
+		case AES_192_BYTES:
+			sess->cipher.key_length_in_bytes = AES_192_BYTES;
+			IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
+			break;
+		case AES_256_BYTES:
+			sess->cipher.key_length_in_bytes = AES_256_BYTES;
+			IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
+			break;
+		default:
+			AESNI_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+
+		break;
+
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
+		return -ENOTSUP;
+	}
+
+	/* Set IV parameters */
+	sess->iv.offset = xform->aead.iv.offset;
+	sess->iv.length = xform->aead.iv.length;
+
+	sess->auth.req_digest_len = xform->aead.digest_length;
+	/* CCM digests must be between 4 and 16 and an even number */
+	if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
+			sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
+			(sess->auth.req_digest_len & 1) == 1) {
+		AESNI_MB_LOG(ERR, "Invalid digest size\n");
+		return -EINVAL;
+	}
+	sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+	return 0;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_sym_xform *auth_xform = NULL;
+	const struct rte_crypto_sym_xform *cipher_xform = NULL;
+	const struct rte_crypto_sym_xform *aead_xform = NULL;
+	int ret;
+
+	/* Select Crypto operation - hash then cipher / cipher then hash */
+	switch (aesni_mb_get_chain_order(xform)) {
+	case AESNI_MB_OP_HASH_CIPHER:
+		sess->chain_order = HASH_CIPHER;
+		auth_xform = xform;
+		cipher_xform = xform->next;
+		break;
+	case AESNI_MB_OP_CIPHER_HASH:
+		sess->chain_order = CIPHER_HASH;
+		auth_xform = xform->next;
+		cipher_xform = xform;
+		break;
+	case AESNI_MB_OP_HASH_ONLY:
+		sess->chain_order = HASH_CIPHER;
+		auth_xform = xform;
+		cipher_xform = NULL;
+		break;
+	case AESNI_MB_OP_CIPHER_ONLY:
+		/*
+		 * Multi buffer library operates only at two modes,
+		 * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
+		 * chain order depends on cipher operation: encryption is always
+		 * the first operation and decryption the last one.
+		 */
+		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+			sess->chain_order = CIPHER_HASH;
+		else
+			sess->chain_order = HASH_CIPHER;
+		auth_xform = NULL;
+		cipher_xform = xform;
+		break;
+	case AESNI_MB_OP_AEAD_CIPHER_HASH:
+		sess->chain_order = CIPHER_HASH;
+		sess->aead.aad_len = xform->aead.aad_length;
+		aead_xform = xform;
+		break;
+	case AESNI_MB_OP_AEAD_HASH_CIPHER:
+		sess->chain_order = HASH_CIPHER;
+		sess->aead.aad_len = xform->aead.aad_length;
+		aead_xform = xform;
+		break;
+	case AESNI_MB_OP_NOT_SUPPORTED:
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter");
+		return -ENOTSUP;
+	}
+
+	/* Default IV length = 0 */
+	sess->iv.length = 0;
+
+	ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
+	if (ret != 0) {
+		AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
+		return ret;
+	}
+
+	ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
+			cipher_xform);
+	if (ret != 0) {
+		AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
+		return ret;
+	}
+
+	if (aead_xform) {
+		ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
+				aead_xform);
+		if (ret != 0) {
+			AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * burst enqueue, place crypto operations on ingress queue for processing.
+ *
+ * @param __qp         Queue Pair to process
+ * @param ops          Crypto operations for processing
+ * @param nb_ops       Number of crypto operations for processing
+ *
+ * @return
+ * - Number of crypto operations enqueued
+ */
+static uint16_t
+aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct aesni_mb_qp *qp = __qp;
+
+	unsigned int nb_enqueued;
+
+	nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
+			(void **)ops, nb_ops, NULL);
+
+	qp->stats.enqueued_count += nb_enqueued;
+
+	return nb_enqueued;
+}
+
+/** Get multi buffer session */
+static inline struct aesni_mb_session *
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
+{
+	struct aesni_mb_session *sess = NULL;
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		if (likely(op->sym->session != NULL))
+			sess = (struct aesni_mb_session *)
+					get_sym_session_private_data(
+					op->sym->session,
+					cryptodev_driver_id);
+	} else {
+		void *_sess = NULL;
+		void *_sess_private_data = NULL;
+
+		if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+			return NULL;
+
+		if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+			return NULL;
+
+		sess = (struct aesni_mb_session *)_sess_private_data;
+
+		if (unlikely(aesni_mb_set_session_parameters(qp->mb_mgr,
+				sess, op->sym->xform) != 0)) {
+			rte_mempool_put(qp->sess_mp, _sess);
+			rte_mempool_put(qp->sess_mp, _sess_private_data);
+			sess = NULL;
+		}
+		op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+		set_sym_session_private_data(op->sym->session,
+				cryptodev_driver_id, _sess_private_data);
+	}
+
+	if (unlikely(sess == NULL))
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+	return sess;
+}
+
+/**
+ * Process a crypto operation and complete a JOB_AES_HMAC job structure for
+ * submission to the multi buffer library for processing.
+ *
+ * @param	qp	queue pair
+ * @param	job	JOB_AES_HMAC structure to fill
+ * @param	m	mbuf to process
+ *
+ * @return
+ * - Completed JOB_AES_HMAC structure pointer on success
+ * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
+ */
+static inline int
+set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
+		struct rte_crypto_op *op, uint8_t *digest_idx)
+{
+	struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+	struct aesni_mb_session *session;
+	uint16_t m_offset = 0;
+
+	session = get_session(qp, op);
+	if (session == NULL) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+		return -1;
+	}
+
+	/* Set crypto operation */
+	job->chain_order = session->chain_order;
+
+	/* Set cipher parameters */
+	job->cipher_direction = session->cipher.direction;
+	job->cipher_mode = session->cipher.mode;
+
+	job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
+
+	/* Set authentication parameters */
+	job->hash_alg = session->auth.algo;
+
+	switch (job->hash_alg) {
+	case AES_XCBC:
+		job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
+		job->u.XCBC._k2 = session->auth.xcbc.k2;
+		job->u.XCBC._k3 = session->auth.xcbc.k3;
+
+		job->aes_enc_key_expanded =
+				session->cipher.expanded_aes_keys.encode;
+		job->aes_dec_key_expanded =
+				session->cipher.expanded_aes_keys.decode;
+		break;
+
+	case AES_CCM:
+		job->u.CCM.aad = op->sym->aead.aad.data + 18;
+		job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
+		job->aes_enc_key_expanded =
+				session->cipher.expanded_aes_keys.encode;
+		job->aes_dec_key_expanded =
+				session->cipher.expanded_aes_keys.decode;
+		break;
+
+	case AES_CMAC:
+		job->u.CMAC._key_expanded = session->auth.cmac.expkey;
+		job->u.CMAC._skey1 = session->auth.cmac.skey1;
+		job->u.CMAC._skey2 = session->auth.cmac.skey2;
+		job->aes_enc_key_expanded =
+				session->cipher.expanded_aes_keys.encode;
+		job->aes_dec_key_expanded =
+				session->cipher.expanded_aes_keys.decode;
+		break;
+
+	case AES_GMAC:
+		job->u.GCM.aad = op->sym->aead.aad.data;
+		job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
+		job->aes_enc_key_expanded = &session->cipher.gcm_key;
+		job->aes_dec_key_expanded = &session->cipher.gcm_key;
+		break;
+
+	default:
+		job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
+		job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
+
+		if (job->cipher_mode == DES3) {
+			job->aes_enc_key_expanded =
+				session->cipher.exp_3des_keys.ks_ptr;
+			job->aes_dec_key_expanded =
+				session->cipher.exp_3des_keys.ks_ptr;
+		} else {
+			job->aes_enc_key_expanded =
+				session->cipher.expanded_aes_keys.encode;
+			job->aes_dec_key_expanded =
+				session->cipher.expanded_aes_keys.decode;
+		}
+	}
+
+	/* Mutable crypto operation parameters */
+	if (op->sym->m_dst) {
+		m_src = m_dst = op->sym->m_dst;
+
+		/* append space for output data to mbuf */
+		char *odata = rte_pktmbuf_append(m_dst,
+				rte_pktmbuf_data_len(op->sym->m_src));
+		if (odata == NULL) {
+			AESNI_MB_LOG(ERR, "failed to allocate space in destination "
+					"mbuf for source data");
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
+		}
+
+		memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
+				rte_pktmbuf_data_len(op->sym->m_src));
+	} else {
+		m_dst = m_src;
+		if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC)
+			m_offset = op->sym->aead.data.offset;
+		else
+			m_offset = op->sym->cipher.data.offset;
+	}
+
+	/* Set digest output location */
+	if (job->hash_alg != NULL_HASH &&
+			session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+		job->auth_tag_output = qp->temp_digests[*digest_idx];
+		*digest_idx = (*digest_idx + 1) % MAX_JOBS;
+	} else {
+		if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC)
+			job->auth_tag_output = op->sym->aead.digest.data;
+		else
+			job->auth_tag_output = op->sym->auth.digest.data;
+
+		if (session->auth.req_digest_len != session->auth.gen_digest_len) {
+			job->auth_tag_output = qp->temp_digests[*digest_idx];
+			*digest_idx = (*digest_idx + 1) % MAX_JOBS;
+		}
+	}
+	/*
+	 * Multi-buffer library current only support returning a truncated
+	 * digest length as specified in the relevant IPsec RFCs
+	 */
+
+	/* Set digest length */
+	job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
+
+	/* Set IV parameters */
+	job->iv_len_in_bytes = session->iv.length;
+
+	/* Data  Parameter */
+	job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+	job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
+
+	switch (job->hash_alg) {
+	case AES_CCM:
+		job->cipher_start_src_offset_in_bytes =
+				op->sym->aead.data.offset;
+		job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
+		job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
+		job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
+
+		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+			session->iv.offset + 1);
+		break;
+
+	case AES_GMAC:
+		job->cipher_start_src_offset_in_bytes =
+				op->sym->aead.data.offset;
+		job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
+		job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
+		job->msg_len_to_hash_in_bytes = job->msg_len_to_cipher_in_bytes;
+		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+				session->iv.offset);
+		break;
+
+	default:
+		job->cipher_start_src_offset_in_bytes =
+				op->sym->cipher.data.offset;
+		job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
+
+		job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+		job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
+
+		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+			session->iv.offset);
+	}
+
+	/* Set user data to be crypto operation data struct */
+	job->user_data = op;
+
+	return 0;
+}
+
+static inline void
+verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
+		struct aesni_mb_session *sess)
+{
+	/* Verify digest if required */
+	if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC) {
+		if (memcmp(job->auth_tag_output, op->sym->aead.digest.data,
+				sess->auth.req_digest_len) != 0)
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	} else {
+		if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
+				sess->auth.req_digest_len) != 0)
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	}
+}
+
+static inline void
+generate_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
+		struct aesni_mb_session *sess)
+{
+	/* No extra copy neeed */
+	if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
+		return;
+
+	/*
+	 * This can only happen for HMAC, so only digest
+	 * for authentication algos is required
+	 */
+	memcpy(op->sym->auth.digest.data, job->auth_tag_output,
+			sess->auth.req_digest_len);
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param qp		Queue Pair to process
+ * @param job	JOB_AES_HMAC job to process
+ *
+ * @return
+ * - Returns processed crypto operation.
+ * - Returns NULL on invalid job
+ */
+static inline struct rte_crypto_op *
+post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
+{
+	struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
+	struct aesni_mb_session *sess = get_sym_session_private_data(
+							op->sym->session,
+							cryptodev_driver_id);
+
+	if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
+		switch (job->status) {
+		case STS_COMPLETED:
+			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+			if (job->hash_alg != NULL_HASH) {
+				if (sess->auth.operation ==
+						RTE_CRYPTO_AUTH_OP_VERIFY)
+					verify_digest(job, op, sess);
+				else
+					generate_digest(job, op, sess);
+			}
+			break;
+		default:
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		}
+	}
+
+	/* Free session if a session-less crypto op */
+	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		memset(sess, 0, sizeof(struct aesni_mb_session));
+		memset(op->sym->session, 0,
+				rte_cryptodev_sym_get_header_session_size());
+		rte_mempool_put(qp->sess_mp, sess);
+		rte_mempool_put(qp->sess_mp, op->sym->session);
+		op->sym->session = NULL;
+	}
+
+	return op;
+}
+
+/**
+ * Process a completed JOB_AES_HMAC job and keep processing jobs until
+ * get_completed_job return NULL
+ *
+ * @param qp		Queue Pair to process
+ * @param job		JOB_AES_HMAC job
+ *
+ * @return
+ * - Number of processed jobs
+ */
+static unsigned
+handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct rte_crypto_op *op = NULL;
+	unsigned processed_jobs = 0;
+
+	while (job != NULL) {
+		op = post_process_mb_job(qp, job);
+
+		if (op) {
+			ops[processed_jobs++] = op;
+			qp->stats.dequeued_count++;
+		} else {
+			qp->stats.dequeue_err_count++;
+			break;
+		}
+		if (processed_jobs == nb_ops)
+			break;
+
+		job = IMB_GET_COMPLETED_JOB(qp->mb_mgr);
+	}
+
+	return processed_jobs;
+}
+
+static inline uint16_t
+flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	int processed_ops = 0;
+
+	/* Flush the remaining jobs */
+	JOB_AES_HMAC *job = IMB_FLUSH_JOB(qp->mb_mgr);
+
+	if (job)
+		processed_ops += handle_completed_jobs(qp, job,
+				&ops[processed_ops], nb_ops - processed_ops);
+
+	return processed_ops;
+}
+
+static inline JOB_AES_HMAC *
+set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
+{
+	job->chain_order = HASH_CIPHER;
+	job->cipher_mode = NULL_CIPHER;
+	job->hash_alg = NULL_HASH;
+	job->cipher_direction = DECRYPT;
+
+	/* Set user data to be crypto operation data struct */
+	job->user_data = op;
+
+	return job;
+}
+
+static uint16_t
+aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct aesni_mb_qp *qp = queue_pair;
+
+	struct rte_crypto_op *op;
+	JOB_AES_HMAC *job;
+
+	int retval, processed_jobs = 0;
+
+	if (unlikely(nb_ops == 0))
+		return 0;
+
+	uint8_t digest_idx = qp->digest_idx;
+	do {
+		/* Get next free mb job struct from mb manager */
+		job = IMB_GET_NEXT_JOB(qp->mb_mgr);
+		if (unlikely(job == NULL)) {
+			/* if no free mb job structs we need to flush mb_mgr */
+			processed_jobs += flush_mb_mgr(qp,
+					&ops[processed_jobs],
+					nb_ops - processed_jobs);
+
+			if (nb_ops == processed_jobs)
+				break;
+
+			job = IMB_GET_NEXT_JOB(qp->mb_mgr);
+		}
+
+		/*
+		 * Get next operation to process from ingress queue.
+		 * There is no need to return the job to the MB_MGR
+		 * if there are no more operations to process, since the MB_MGR
+		 * can use that pointer again in next get_next calls.
+		 */
+		retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
+		if (retval < 0)
+			break;
+
+		retval = set_mb_job_params(job, qp, op, &digest_idx);
+		if (unlikely(retval != 0)) {
+			qp->stats.dequeue_err_count++;
+			set_job_null_op(job, op);
+		}
+
+		/* Submit job to multi-buffer for processing */
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
+		job = IMB_SUBMIT_JOB(qp->mb_mgr);
+#else
+		job = IMB_SUBMIT_JOB_NOCHECK(qp->mb_mgr);
+#endif
+		/*
+		 * If submit returns a processed job then handle it,
+		 * before submitting subsequent jobs
+		 */
+		if (job)
+			processed_jobs += handle_completed_jobs(qp, job,
+					&ops[processed_jobs],
+					nb_ops - processed_jobs);
+
+	} while (processed_jobs < nb_ops);
+
+	qp->digest_idx = digest_idx;
+
+	if (processed_jobs < 1)
+		processed_jobs += flush_mb_mgr(qp,
+				&ops[processed_jobs],
+				nb_ops - processed_jobs);
+
+	return processed_jobs;
+}
+
+static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
+
+static int
+cryptodev_aesni_mb_create(const char *name,
+			struct rte_vdev_device *vdev,
+			struct rte_cryptodev_pmd_init_params *init_params)
+{
+	struct rte_cryptodev *dev;
+	struct aesni_mb_private *internals;
+	enum aesni_mb_vector_mode vector_mode;
+	MB_MGR *mb_mgr;
+
+	/* Check CPU for support for AES instruction set */
+	if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+		AESNI_MB_LOG(ERR, "AES instructions not supported by CPU");
+		return -EFAULT;
+	}
+
+	dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+	if (dev == NULL) {
+		AESNI_MB_LOG(ERR, "failed to create cryptodev vdev");
+		return -ENODEV;
+	}
+
+	/* Check CPU for supported vector instruction set */
+	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+		vector_mode = RTE_AESNI_MB_AVX512;
+	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+		vector_mode = RTE_AESNI_MB_AVX2;
+	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+		vector_mode = RTE_AESNI_MB_AVX;
+	else
+		vector_mode = RTE_AESNI_MB_SSE;
+
+	dev->driver_id = cryptodev_driver_id;
+	dev->dev_ops = rte_aesni_mb_pmd_ops;
+
+	/* register rx/tx burst functions for data path */
+	dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
+	dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
+
+	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+			RTE_CRYPTODEV_FF_CPU_AESNI;
+
+	mb_mgr = alloc_mb_mgr(0);
+	if (mb_mgr == NULL)
+		return -ENOMEM;
+
+	switch (vector_mode) {
+	case RTE_AESNI_MB_SSE:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+		init_mb_mgr_sse(mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+		init_mb_mgr_avx(mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX2:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+		init_mb_mgr_avx2(mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX512:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+		init_mb_mgr_avx512(mb_mgr);
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
+		goto error_exit;
+	}
+
+	/* Set vector instructions mode supported */
+	internals = dev->data->dev_private;
+
+	internals->vector_mode = vector_mode;
+	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+	internals->mb_mgr = mb_mgr;
+
+	AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
+			imb_get_version_str());
+
+	return 0;
+
+error_exit:
+	if (mb_mgr)
+		free_mb_mgr(mb_mgr);
+
+	rte_cryptodev_pmd_destroy(dev);
+
+	return -1;
+}
+
+static int
+cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
+{
+	struct rte_cryptodev_pmd_init_params init_params = {
+		"",
+		sizeof(struct aesni_mb_private),
+		rte_socket_id(),
+		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+	};
+	const char *name, *args;
+	int retval;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	args = rte_vdev_device_args(vdev);
+
+	retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+	if (retval) {
+		AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]",
+				args);
+		return -EINVAL;
+	}
+
+	return cryptodev_aesni_mb_create(name, vdev, &init_params);
+}
+
+static int
+cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
+{
+	struct rte_cryptodev *cryptodev;
+	struct aesni_mb_private *internals;
+	const char *name;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	internals = cryptodev->data->dev_private;
+
+	free_mb_mgr(internals->mb_mgr);
+
+	return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
+	.probe = cryptodev_aesni_mb_probe,
+	.remove = cryptodev_aesni_mb_remove
+};
+
+static struct cryptodev_driver aesni_mb_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
+	"max_nb_queue_pairs=<int> "
+	"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
+		cryptodev_aesni_mb_pmd_drv.driver,
+		cryptodev_driver_id);
+
+RTE_INIT(aesni_mb_init_log)
+{
+	aesni_mb_logtype_driver = rte_log_register("pmd.crypto.aesni_mb");
+}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
new file mode 100644
index 000000000..5788e37d1
--- /dev/null
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -0,0 +1,681 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2017 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_aesni_mb_pmd_private.h"
+
+
+static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 16,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 20,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA224 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 28,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 32,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 48,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 16,
+					.increment = 4
+				}
+			}, }
+		}, }
+	},
+	{	/* AES DOCSIS BPI */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/*  3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 8,
+					.max = 24,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* DES DOCSIS BPI */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
+				.block_size = 8,
+				.key_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 4,
+					.max = 16,
+					.increment = 2
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 46,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 7,
+					.max = 13,
+					.increment = 1
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 16,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 65535,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused struct rte_cryptodev_config *config)
+{
+	return 0;
+}
+
+/** Start device */
+static int
+aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+/** Stop device */
+static void
+aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+
+/** Get device statistics */
+static void
+aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->stats.enqueued_count;
+		stats->dequeued_count += qp->stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->stats.dequeue_err_count;
+	}
+}
+
+/** Reset device statistics */
+static void
+aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->stats, 0, sizeof(qp->stats));
+	}
+}
+
+
+/** Get device info */
+static void
+aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_info *dev_info)
+{
+	struct aesni_mb_private *internals = dev->data->dev_private;
+
+	if (dev_info != NULL) {
+		dev_info->driver_id = dev->driver_id;
+		dev_info->feature_flags = dev->feature_flags;
+		dev_info->capabilities = aesni_mb_pmd_capabilities;
+		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+		/* No limit of number of sessions */
+		dev_info->sym.max_nb_sessions = 0;
+	}
+}
+
+/** Release queue pair */
+static int
+aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+	struct rte_ring *r = NULL;
+
+	if (qp != NULL) {
+		r = rte_ring_lookup(qp->name);
+		if (r)
+			rte_ring_free(r);
+		if (qp->mb_mgr)
+			free_mb_mgr(qp->mb_mgr);
+		rte_free(qp);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+		struct aesni_mb_qp *qp)
+{
+	unsigned n = snprintf(qp->name, sizeof(qp->name),
+			"aesni_mb_pmd_%u_qp_%u",
+			dev->data->dev_id, qp->id);
+
+	if (n >= sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
+		unsigned int ring_size, int socket_id)
+{
+	struct rte_ring *r;
+	char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+	unsigned int n = snprintf(ring_name, sizeof(ring_name), "%s", qp->name);
+
+	if (n >= sizeof(ring_name))
+		return NULL;
+
+	r = rte_ring_lookup(ring_name);
+	if (r) {
+		if (rte_ring_get_size(r) >= ring_size) {
+			AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops",
+			ring_name);
+			return r;
+		}
+
+		AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops",
+			ring_name);
+		return NULL;
+	}
+
+	return rte_ring_create(ring_name, ring_size, socket_id,
+			RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		const struct rte_cryptodev_qp_conf *qp_conf,
+		int socket_id, struct rte_mempool *session_pool)
+{
+	struct aesni_mb_qp *qp = NULL;
+	struct aesni_mb_private *internals = dev->data->dev_private;
+	int ret = -1;
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		aesni_mb_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
+					RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL)
+		return -ENOMEM;
+
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
+		goto qp_setup_cleanup;
+
+
+	qp->mb_mgr = alloc_mb_mgr(0);
+	if (qp->mb_mgr == NULL) {
+		ret = -ENOMEM;
+		goto qp_setup_cleanup;
+	}
+
+	switch (internals->vector_mode) {
+	case RTE_AESNI_MB_SSE:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+		init_mb_mgr_sse(qp->mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+		init_mb_mgr_avx(qp->mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX2:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+		init_mb_mgr_avx2(qp->mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX512:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+		init_mb_mgr_avx512(qp->mb_mgr);
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n",
+				internals->vector_mode);
+		goto qp_setup_cleanup;
+	}
+
+	qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
+			qp_conf->nb_descriptors, socket_id);
+	if (qp->ingress_queue == NULL) {
+		ret = -1;
+		goto qp_setup_cleanup;
+	}
+
+	qp->sess_mp = session_pool;
+
+	memset(&qp->stats, 0, sizeof(qp->stats));
+
+	char mp_name[RTE_MEMPOOL_NAMESIZE];
+
+	snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+				"digest_mp_%u_%u", dev->data->dev_id, qp_id);
+	return 0;
+
+qp_setup_cleanup:
+	if (qp) {
+		if (qp->mb_mgr)
+			free_mb_mgr(qp->mb_mgr);
+		rte_free(qp);
+	}
+
+	return ret;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
+{
+	return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni multi-buffer session structure */
+static unsigned
+aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct aesni_mb_session);
+}
+
+/** Configure a aesni multi-buffer session from a crypto xform chain */
+static int
+aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_sym_xform *xform,
+		struct rte_cryptodev_sym_session *sess,
+		struct rte_mempool *mempool)
+{
+	void *sess_private_data;
+	struct aesni_mb_private *internals = dev->data->dev_private;
+	int ret;
+
+	if (unlikely(sess == NULL)) {
+		AESNI_MB_LOG(ERR, "invalid session struct");
+		return -EINVAL;
+	}
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		AESNI_MB_LOG(ERR,
+				"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	ret = aesni_mb_set_session_parameters(internals->mb_mgr,
+			sess_private_data, xform);
+	if (ret != 0) {
+		AESNI_MB_LOG(ERR, "failed configure session parameters");
+
+		/* Return session to mempool */
+		rte_mempool_put(mempool, sess_private_data);
+		return ret;
+	}
+
+	set_sym_session_private_data(sess, dev->driver_id,
+			sess_private_data);
+
+	return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
+		struct rte_cryptodev_sym_session *sess)
+{
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_sym_session_private_data(sess, index);
+
+	/* Zero out the whole structure */
+	if (sess_priv) {
+		memset(sess_priv, 0, sizeof(struct aesni_mb_session));
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+		set_sym_session_private_data(sess, index, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
+	}
+}
+
+struct rte_cryptodev_ops aesni_mb_pmd_ops = {
+		.dev_configure		= aesni_mb_pmd_config,
+		.dev_start		= aesni_mb_pmd_start,
+		.dev_stop		= aesni_mb_pmd_stop,
+		.dev_close		= aesni_mb_pmd_close,
+
+		.stats_get		= aesni_mb_pmd_stats_get,
+		.stats_reset		= aesni_mb_pmd_stats_reset,
+
+		.dev_infos_get		= aesni_mb_pmd_info_get,
+
+		.queue_pair_setup	= aesni_mb_pmd_qp_setup,
+		.queue_pair_release	= aesni_mb_pmd_qp_release,
+		.queue_pair_count	= aesni_mb_pmd_qp_count,
+
+		.sym_session_get_size	= aesni_mb_pmd_sym_session_get_size,
+		.sym_session_configure	= aesni_mb_pmd_sym_session_configure,
+		.sym_session_clear	= aesni_mb_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index d8021cdaa..d61abfe4f 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -5,7 +5,32 @@
 #ifndef _RTE_AESNI_MB_PMD_PRIVATE_H_
 #define _RTE_AESNI_MB_PMD_PRIVATE_H_
 
+#include <intel-ipsec-mb.h>
+
+
+/*
+ * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
+ * so if macro is not defined, it means that the version is 0.49.
+ */
+#if !defined(IMB_VERSION_NUM)
+#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
+#endif
+
+#if IMB_VERSION_NUM < IMB_VERSION(0, 52, 0)
 #include "aesni_mb_ops.h"
+#endif
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 52, 0)
+enum aesni_mb_vector_mode {
+	RTE_AESNI_MB_NOT_SUPPORTED = 0,
+	RTE_AESNI_MB_SSE,
+	RTE_AESNI_MB_AVX,
+	RTE_AESNI_MB_AVX2,
+	RTE_AESNI_MB_AVX512
+};
+#endif
+
 
 #define CRYPTODEV_NAME_AESNI_MB_PMD	crypto_aesni_mb
 /**< AES-NI Multi buffer PMD device name */
@@ -83,7 +108,9 @@ static const unsigned auth_digest_byte_lengths[] = {
 		[AES_XCBC]	= 16,
 		[AES_CMAC]	= 16,
 		[AES_GMAC]	= 12,
-		[NULL_HASH]		= 0
+		[NULL_HASH]	= 0,
+	/**< Vector mode dependent pointer table of the multi-buffer APIs */
+
 };
 
 /**
@@ -115,6 +142,10 @@ struct aesni_mb_private {
 	/**< CPU vector instruction set mode */
 	unsigned max_nb_queue_pairs;
 	/**< Max number of queue pairs supported by device */
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 52, 0)
+	MB_MGR *mb_mgr;
+	/**< Multi-buffer instance */
+#endif
 };
 
 /** AESNI Multi buffer queue pair */
@@ -122,13 +153,15 @@ struct aesni_mb_qp {
 	uint16_t id;
 	/**< Queue Pair Identifier */
 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+#if IMB_VERSION_NUM < IMB_VERSION(0, 52, 0)
 	/**< Unique Queue Pair Name */
 	const struct aesni_mb_op_fns *op_fns;
-	/**< Vector mode dependent pointer table of the multi-buffer APIs */
+#endif
+	/**< Unique Queue Pair Name */
 	MB_MGR *mb_mgr;
 	/**< Multi-buffer instance */
 	struct rte_ring *ingress_queue;
-       /**< Ring for placing operations ready for processing */
+	/**< Ring for placing operations ready for processing */
 	struct rte_mempool *sess_mp;
 	/**< Session Mempool */
 	struct rte_cryptodev_stats stats;
@@ -153,7 +186,9 @@ struct aesni_mb_session {
 	} iv;
 	/**< IV parameters */
 
-	/** Cipher Parameters */
+	/** Cipher Parameters */const struct aesni_mb_op_fns *op_fns;
+	/**< Vector mode dependent pointer table of the multi-buffer APIs */
+
 	struct {
 		/** Cipher direction - encrypt / decrypt */
 		JOB_CIPHER_DIRECTION direction;
@@ -234,14 +269,21 @@ struct aesni_mb_session {
 } __rte_cache_aligned;
 
 
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 52, 0)
 /**
  *
  */
 extern int
+aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform);
+#else
+extern int
 aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
 		struct aesni_mb_session *sess,
 		const struct rte_crypto_sym_xform *xform);
-
+#endif
 
 /** device specific operations function pointer structure */
 extern struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops;
-- 
2.13.6

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v3 3/4] doc: update library support version
  2018-12-19 20:16   ` [dpdk-dev] [PATCH v3 0/4] use architecure independent macros Fan Zhang
  2018-12-19 20:16     ` [dpdk-dev] [PATCH v3 1/4] crypto/aesni_mb: rename files to compatible Fan Zhang
  2018-12-19 20:16     ` [dpdk-dev] [PATCH v3 2/4] crypto/aesni_mb: use architecture independent macros Fan Zhang
@ 2018-12-19 20:16     ` Fan Zhang
  2018-12-19 20:16     ` [dpdk-dev] [PATCH v3 4/4] doc: update deprecation notice Fan Zhang
  2018-12-20 11:56     ` [dpdk-dev] [PATCH v4 0/3] use architecure independent macros Fan Zhang
  4 siblings, 0 replies; 15+ messages in thread
From: Fan Zhang @ 2018-12-19 20:16 UTC (permalink / raw)
  To: dev; +Cc: akhil.goyal, thomas

This patch updates the AESNI-MB PMD document with the new intel-ipsec-mb
version number.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 doc/guides/cryptodevs/aesni_mb.rst | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index 63e060d75..12532c63e 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -59,8 +59,8 @@ Installation
 To build DPDK with the AESNI_MB_PMD the user is required to download the multi-buffer
 library from `here <https://github.com/01org/intel-ipsec-mb>`_
 and compile it on their user system before building DPDK.
-The latest version of the library supported by this PMD is v0.50, which
-can be downloaded from `<https://github.com/01org/intel-ipsec-mb/archive/v0.50.zip>`_.
+The latest version of the library supported by this PMD is v0.52, which
+can be downloaded from `<https://github.com/01org/intel-ipsec-mb/archive/v0.52.zip>`_.
 
 .. code-block:: console
 
-- 
2.13.6

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v3 4/4] doc: update deprecation notice
  2018-12-19 20:16   ` [dpdk-dev] [PATCH v3 0/4] use architecure independent macros Fan Zhang
                       ` (2 preceding siblings ...)
  2018-12-19 20:16     ` [dpdk-dev] [PATCH v3 3/4] doc: update library support version Fan Zhang
@ 2018-12-19 20:16     ` Fan Zhang
  2018-12-20 11:56     ` [dpdk-dev] [PATCH v4 0/3] use architecure independent macros Fan Zhang
  4 siblings, 0 replies; 15+ messages in thread
From: Fan Zhang @ 2018-12-19 20:16 UTC (permalink / raw)
  To: dev; +Cc: akhil.goyal, thomas

>From 19.05 the AESNI-MB PMD will not support the intel-ipsec-mb library
version older than 0.52.0.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 doc/guides/rel_notes/deprecation.rst | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index ac7fb29a7..0578978d8 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -98,3 +98,6 @@ Deprecation Notices
   - The size and layout of ``rte_cryptodev_qp_conf`` and syntax of
     ``rte_cryptodev_queue_pair_setup`` will change to to allow to use
     two different mempools for crypto and device private sessions.
+
+ * aesni_mb: the minimum supported intel-ipsec-mb library version will be
+   changed from 0.49.0 to 0.52.0.
-- 
2.13.6

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v4 0/3] use architecure independent macros
  2018-12-19 20:16   ` [dpdk-dev] [PATCH v3 0/4] use architecure independent macros Fan Zhang
                       ` (3 preceding siblings ...)
  2018-12-19 20:16     ` [dpdk-dev] [PATCH v3 4/4] doc: update deprecation notice Fan Zhang
@ 2018-12-20 11:56     ` Fan Zhang
  2018-12-20 11:56       ` [dpdk-dev] [PATCH v4 1/3] crypto/aesni_mb: rename files Fan Zhang
                         ` (3 more replies)
  4 siblings, 4 replies; 15+ messages in thread
From: Fan Zhang @ 2018-12-20 11:56 UTC (permalink / raw)
  To: dev; +Cc: akhil.goyal, pablo.de.lara.guarch

This patch updates the aesni_mb to use IMB_* arch independent macros to
reduce the code size and future maintaining effort.

In intel-ipsec-mb library 0.52 all supported algorithms now have the IMB_*
arch independent macros enabled. The macros help reducing the application's
code size and remove the burden of maintaining the support to different
architectures such as SSE and AVX*, etc.

This patch adds this support into AESNI-MB PMD. Meanwhile to keep
supporting the older version of intel-ipsec-mb library, the existing
rte_aesni_mb_pmd*.c are renamed to rte_aesni_mb_pmd*_compat.c and the
compiler will check the version number in /usr/include/inte-ipsec-mb.h and
decides which files to be compiled. For intel-ipsec-mb library 0.52 the
rte_aesni_mb_pmd*.c will be compiled. For the older version
rte_aesni_mb_pmd*_compat.c will be compiled.

It is planned to change the minimum intel-ipsec-mb support version to 0.52
in DPDK 19.05 release. By then all code intended for supporting older
version will be removed, including rte_aesni_mb_pmd*_compat.c.

Acked-by: Damian Nowak <damianx.nowak@intel.com>

v4:
- combined documentation patches into one.
- updated release note.

v3:
- patch split.
- fixed meson build bug.
- updated commit message.
- updated documentation.

v2:
- making the PMD compatible with both new intel-ipsec-mb version 0.52 and older
- fixed a bug

Fan Zhang (3):
  crypto/aesni_mb: rename files
  crypto/aesni_mb: use architecture independent macros
  doc: update documentation

 doc/guides/cryptodevs/aesni_mb.rst                 |    4 +-
 doc/guides/rel_notes/deprecation.rst               |    3 +
 doc/guides/rel_notes/release_19_02.rst             |    3 +
 drivers/crypto/aesni_mb/Makefile                   |   26 +-
 drivers/crypto/aesni_mb/meson.build                |   18 +-
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c         |  190 ++-
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_compat.c  | 1239 ++++++++++++++++++++
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c     |   86 +-
 .../crypto/aesni_mb/rte_aesni_mb_pmd_ops_compat.c  |  719 ++++++++++++
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h |   52 +-
 10 files changed, 2169 insertions(+), 171 deletions(-)
 create mode 100644 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_compat.c
 create mode 100644 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops_compat.c

-- 
2.13.6

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v4 1/3] crypto/aesni_mb: rename files
  2018-12-20 11:56     ` [dpdk-dev] [PATCH v4 0/3] use architecure independent macros Fan Zhang
@ 2018-12-20 11:56       ` Fan Zhang
  2018-12-20 11:56       ` [dpdk-dev] [PATCH v4 2/3] crypto/aesni_mb: use architecture independent macros Fan Zhang
                         ` (2 subsequent siblings)
  3 siblings, 0 replies; 15+ messages in thread
From: Fan Zhang @ 2018-12-20 11:56 UTC (permalink / raw)
  To: dev; +Cc: akhil.goyal, pablo.de.lara.guarch, Lukasz Krakowiak

This patch renames the rte_aesni_mb_pmd*.c to
rte_aesni_mb_pmd*_compat.c to indicate those files are for
compatible with older versoin of intel-ipsec-mb library
only.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
Acked-by: Damian Nowak <damianx.nowak@intel.com>
---
 drivers/crypto/aesni_mb/Makefile                                      | 4 ++--
 drivers/crypto/aesni_mb/meson.build                                   | 2 +-
 .../crypto/aesni_mb/{rte_aesni_mb_pmd.c => rte_aesni_mb_pmd_compat.c} | 0
 .../{rte_aesni_mb_pmd_ops.c => rte_aesni_mb_pmd_ops_compat.c}         | 0
 4 files changed, 3 insertions(+), 3 deletions(-)
 rename drivers/crypto/aesni_mb/{rte_aesni_mb_pmd.c => rte_aesni_mb_pmd_compat.c} (100%)
 rename drivers/crypto/aesni_mb/{rte_aesni_mb_pmd_ops.c => rte_aesni_mb_pmd_ops_compat.c} (100%)

diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile
index 806a95eb8..5a8671cd4 100644
--- a/drivers/crypto/aesni_mb/Makefile
+++ b/drivers/crypto/aesni_mb/Makefile
@@ -23,7 +23,7 @@ LDLIBS += -lrte_cryptodev
 LDLIBS += -lrte_bus_vdev
 
 # library source files
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_compat.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops_compat.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/aesni_mb/meson.build b/drivers/crypto/aesni_mb/meson.build
index aae0995e5..ed68c7f39 100644
--- a/drivers/crypto/aesni_mb/meson.build
+++ b/drivers/crypto/aesni_mb/meson.build
@@ -8,5 +8,5 @@ else
 	ext_deps += lib
 endif
 
-sources = files('rte_aesni_mb_pmd.c', 'rte_aesni_mb_pmd_ops.c')
+sources = files('rte_aesni_mb_pmd_compat.c', 'rte_aesni_mb_pmd_ops_compat.c')
 deps += ['bus_vdev']
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_compat.c
similarity index 100%
rename from drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
rename to drivers/crypto/aesni_mb/rte_aesni_mb_pmd_compat.c
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops_compat.c
similarity index 100%
rename from drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
rename to drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops_compat.c
-- 
2.13.6

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v4 2/3] crypto/aesni_mb: use architecture independent macros
  2018-12-20 11:56     ` [dpdk-dev] [PATCH v4 0/3] use architecure independent macros Fan Zhang
  2018-12-20 11:56       ` [dpdk-dev] [PATCH v4 1/3] crypto/aesni_mb: rename files Fan Zhang
@ 2018-12-20 11:56       ` Fan Zhang
  2018-12-20 11:56       ` [dpdk-dev] [PATCH v4 3/3] doc: update documentation Fan Zhang
  2019-01-09 22:09       ` [dpdk-dev] [PATCH v4 0/3] use architecure independent macros De Lara Guarch, Pablo
  3 siblings, 0 replies; 15+ messages in thread
From: Fan Zhang @ 2018-12-20 11:56 UTC (permalink / raw)
  To: dev; +Cc: akhil.goyal, pablo.de.lara.guarch, Lukasz Krakowiak

This patch duplicates the original rte_aesni_mb_pmd*.c files and replaces
the function calls provided by intel-ipsec-mb library into
architecture-independent macros. The build systems are updated to choose
compiling either rte_aesni_mb_pmd*.c or rte_aesni_mb_pmd*_compat.c based
on the installed intel-ipsec-mb version. For the intel-ipsec-mb older
than 0.52.0 rte_aesni_mb_pmd*_compat.c will be compiled, otherwise
rte_aesni_mb_pmd*.c will be compiled.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
Acked-by: Damian Nowak <damianx.nowak@intel.com>
---
 drivers/crypto/aesni_mb/Makefile                   |   26 +-
 drivers/crypto/aesni_mb/meson.build                |   18 +-
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c         | 1237 ++++++++++++++++++++
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c     |  681 +++++++++++
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h |   52 +-
 5 files changed, 2003 insertions(+), 11 deletions(-)
 create mode 100644 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
 create mode 100644 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c

diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile
index 5a8671cd4..c2bda5838 100644
--- a/drivers/crypto/aesni_mb/Makefile
+++ b/drivers/crypto/aesni_mb/Makefile
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2015 Intel Corporation
+# Copyright(c) 2015-2018 Intel Corporation
 
 include $(RTE_SDK)/mk/rte.vars.mk
 
@@ -22,8 +22,26 @@ LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
 LDLIBS += -lrte_cryptodev
 LDLIBS += -lrte_bus_vdev
 
-# library source files
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_compat.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops_compat.c
+IMB_HDR = /usr/include/intel-ipsec-mb.h
+
+# Detect library version
+IMB_VERSION = $(shell grep -e "IMB_VERSION_STR" $(IMB_HDR) | cut -d'"' -f2)
+IMB_VERSION_NUM = $(shell grep -e "IMB_VERSION_NUM" $(IMB_HDR) | cut -d' ' -f3)
+
+ifeq ($(IMB_VERSION),)
+	# files for older version of IMB
+	SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_compat.c
+	SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops_compat.c
+else
+	ifeq ($(shell expr $(IMB_VERSION_NUM) \>= 0x3400), 1)
+		# files for a new version of IMB
+		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
+		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c
+	else
+		# files for older version of IMB
+		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_compat.c
+		SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops_compat.c
+	endif
+endif
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/aesni_mb/meson.build b/drivers/crypto/aesni_mb/meson.build
index ed68c7f39..f292edcfc 100644
--- a/drivers/crypto/aesni_mb/meson.build
+++ b/drivers/crypto/aesni_mb/meson.build
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2018 Intel Corporation
-
+IPSec_MB_ver_0_52 = '0.52.0'
 lib = cc.find_library('IPSec_MB', required: false)
 if not lib.found()
 	build = false
@@ -8,5 +8,19 @@ else
 	ext_deps += lib
 endif
 
-sources = files('rte_aesni_mb_pmd_compat.c', 'rte_aesni_mb_pmd_ops_compat.c')
+imb_arr = cc.get_define('IMB_VERSION_STR',
+	prefix : '#include<intel-ipsec-mb.h>').split('"')
+
+imb_ver =''.join(imb_arr)
+
+if imb_ver.version_compare('>=' + IPSec_MB_ver_0_52)
+	message('Build for a new version of library IPSec_MB[' + imb_ver + ']')
+	sources = files('rte_aesni_mb_pmd.c',
+		'rte_aesni_mb_pmd_ops.c')
+else
+	sources = files('rte_aesni_mb_pmd_compat.c',
+		'rte_aesni_mb_pmd_ops_compat.c')
+	message('Build for older version of library IPSec_MB[' + imb_ver + ']')
+endif
+
 deps += ['bus_vdev']
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
new file mode 100644
index 000000000..2c25b7b32
--- /dev/null
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -0,0 +1,1237 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2017 Intel Corporation
+ */
+
+#include <intel-ipsec-mb.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_aesni_mb_pmd_private.h"
+
+#define AES_CCM_DIGEST_MIN_LEN 4
+#define AES_CCM_DIGEST_MAX_LEN 16
+#define HMAC_MAX_BLOCK_SIZE 128
+static uint8_t cryptodev_driver_id;
+
+typedef void (*hash_one_block_t)(const void *data, void *digest);
+typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
+
+/**
+ * Calculate the authentication pre-computes
+ *
+ * @param one_block_hash	Function pointer to calculate digest on ipad/opad
+ * @param ipad			Inner pad output byte array
+ * @param opad			Outer pad output byte array
+ * @param hkey			Authentication key
+ * @param hkey_len		Authentication key length
+ * @param blocksize		Block size of selected hash algo
+ */
+static void
+calculate_auth_precomputes(hash_one_block_t one_block_hash,
+		uint8_t *ipad, uint8_t *opad,
+		uint8_t *hkey, uint16_t hkey_len,
+		uint16_t blocksize)
+{
+	unsigned i, length;
+
+	uint8_t ipad_buf[blocksize] __rte_aligned(16);
+	uint8_t opad_buf[blocksize] __rte_aligned(16);
+
+	/* Setup inner and outer pads */
+	memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
+	memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
+
+	/* XOR hash key with inner and outer pads */
+	length = hkey_len > blocksize ? blocksize : hkey_len;
+
+	for (i = 0; i < length; i++) {
+		ipad_buf[i] ^= hkey[i];
+		opad_buf[i] ^= hkey[i];
+	}
+
+	/* Compute partial hashes */
+	(*one_block_hash)(ipad_buf, ipad);
+	(*one_block_hash)(opad_buf, opad);
+
+	/* Clean up stack */
+	memset(ipad_buf, 0, blocksize);
+	memset(opad_buf, 0, blocksize);
+}
+
+/** Get xform chain order */
+static enum aesni_mb_operation
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+	if (xform == NULL)
+		return AESNI_MB_OP_NOT_SUPPORTED;
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (xform->next == NULL)
+			return AESNI_MB_OP_CIPHER_ONLY;
+		if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+			return AESNI_MB_OP_CIPHER_HASH;
+	}
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (xform->next == NULL)
+			return AESNI_MB_OP_HASH_ONLY;
+		if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+			return AESNI_MB_OP_HASH_CIPHER;
+	}
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM ||
+				xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
+			if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+				return AESNI_MB_OP_AEAD_CIPHER_HASH;
+			else
+				return AESNI_MB_OP_AEAD_HASH_CIPHER;
+		}
+	}
+
+	return AESNI_MB_OP_NOT_SUPPORTED;
+}
+
+/** Set session authentication parameters */
+static int
+aesni_mb_set_session_auth_parameters(const MB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	hash_one_block_t hash_oneblock_fn;
+	unsigned int key_larger_block_size = 0;
+	uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
+
+	if (xform == NULL) {
+		sess->auth.algo = NULL_HASH;
+		return 0;
+	}
+
+	if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
+		AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth");
+		return -1;
+	}
+
+	/* Set the request digest size */
+	sess->auth.req_digest_len = xform->auth.digest_length;
+
+	/* Select auth generate/verify */
+	sess->auth.operation = xform->auth.op;
+
+	/* Set Authentication Parameters */
+	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
+		sess->auth.algo = AES_XCBC;
+
+		uint16_t xcbc_mac_digest_len =
+			get_truncated_digest_byte_length(AES_XCBC);
+		if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
+			AESNI_MB_LOG(ERR, "Invalid digest size\n");
+			return -EINVAL;
+		}
+		sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+		IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
+				sess->auth.xcbc.k1_expanded,
+				sess->auth.xcbc.k2, sess->auth.xcbc.k3);
+		return 0;
+	}
+
+	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
+		uint32_t dust[4*15];
+
+		sess->auth.algo = AES_CMAC;
+
+		uint16_t cmac_digest_len = get_digest_byte_length(AES_CMAC);
+
+		if (sess->auth.req_digest_len > cmac_digest_len) {
+			AESNI_MB_LOG(ERR, "Invalid digest size\n");
+			return -EINVAL;
+		}
+		/*
+		 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
+		 * in version 0.50 and sizes of 12 and 16 bytes,
+		 * in version 0.49.
+		 * If size requested is different, generate the full digest
+		 * (16 bytes) in a temporary location and then memcpy
+		 * the requested number of bytes.
+		 */
+		if (sess->auth.req_digest_len < 4)
+			sess->auth.gen_digest_len = cmac_digest_len;
+		else
+			sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+		IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
+				sess->auth.cmac.expkey, dust);
+		IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
+				sess->auth.cmac.skey1, sess->auth.cmac.skey2);
+		return 0;
+	}
+
+	switch (xform->auth.algo) {
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		sess->auth.algo = MD5;
+		hash_oneblock_fn = mb_mgr->md5_one_block;
+		break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		sess->auth.algo = SHA1;
+		hash_oneblock_fn = mb_mgr->sha1_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(SHA1)) {
+			IMB_SHA1(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		sess->auth.algo = SHA_224;
+		hash_oneblock_fn = mb_mgr->sha224_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_224)) {
+			IMB_SHA224(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		sess->auth.algo = SHA_256;
+		hash_oneblock_fn = mb_mgr->sha256_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_256)) {
+			IMB_SHA256(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		sess->auth.algo = SHA_384;
+		hash_oneblock_fn = mb_mgr->sha384_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_384)) {
+			IMB_SHA384(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		sess->auth.algo = SHA_512;
+		hash_oneblock_fn = mb_mgr->sha512_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_512)) {
+			IMB_SHA512(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
+		return -ENOTSUP;
+	}
+	uint16_t trunc_digest_size =
+			get_truncated_digest_byte_length(sess->auth.algo);
+	uint16_t full_digest_size =
+			get_digest_byte_length(sess->auth.algo);
+
+	if (sess->auth.req_digest_len > full_digest_size ||
+			sess->auth.req_digest_len == 0) {
+		AESNI_MB_LOG(ERR, "Invalid digest size\n");
+		return -EINVAL;
+	}
+
+	if (sess->auth.req_digest_len != trunc_digest_size &&
+			sess->auth.req_digest_len != full_digest_size)
+		sess->auth.gen_digest_len = full_digest_size;
+	else
+		sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+	/* Calculate Authentication precomputes */
+	if (key_larger_block_size) {
+		calculate_auth_precomputes(hash_oneblock_fn,
+			sess->auth.pads.inner, sess->auth.pads.outer,
+			hashed_key,
+			xform->auth.key.length,
+			get_auth_algo_blocksize(sess->auth.algo));
+	} else {
+		calculate_auth_precomputes(hash_oneblock_fn,
+			sess->auth.pads.inner, sess->auth.pads.outer,
+			xform->auth.key.data,
+			xform->auth.key.length,
+			get_auth_algo_blocksize(sess->auth.algo));
+	}
+
+	return 0;
+}
+
+/** Set session cipher parameters */
+static int
+aesni_mb_set_session_cipher_parameters(const MB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	uint8_t is_aes = 0;
+	uint8_t is_3DES = 0;
+
+	if (xform == NULL) {
+		sess->cipher.mode = NULL_CIPHER;
+		return 0;
+	}
+
+	if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher");
+		return -EINVAL;
+	}
+
+	/* Select cipher direction */
+	switch (xform->cipher.op) {
+	case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
+		sess->cipher.direction = ENCRYPT;
+		break;
+	case RTE_CRYPTO_CIPHER_OP_DECRYPT:
+		sess->cipher.direction = DECRYPT;
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Invalid cipher operation parameter");
+		return -EINVAL;
+	}
+
+	/* Select cipher mode */
+	switch (xform->cipher.algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		sess->cipher.mode = CBC;
+		is_aes = 1;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		sess->cipher.mode = CNTR;
+		is_aes = 1;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+		sess->cipher.mode = DOCSIS_SEC_BPI;
+		is_aes = 1;
+		break;
+	case RTE_CRYPTO_CIPHER_DES_CBC:
+		sess->cipher.mode = DES;
+		break;
+	case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+		sess->cipher.mode = DOCSIS_DES;
+		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		sess->cipher.mode = DES3;
+		is_3DES = 1;
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter");
+		return -ENOTSUP;
+	}
+
+	/* Set IV parameters */
+	sess->iv.offset = xform->cipher.iv.offset;
+	sess->iv.length = xform->cipher.iv.length;
+
+	/* Check key length and choose key expansion function for AES */
+	if (is_aes) {
+		switch (xform->cipher.key.length) {
+		case AES_128_BYTES:
+			sess->cipher.key_length_in_bytes = AES_128_BYTES;
+			IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		case AES_192_BYTES:
+			sess->cipher.key_length_in_bytes = AES_192_BYTES;
+			IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		case AES_256_BYTES:
+			sess->cipher.key_length_in_bytes = AES_256_BYTES;
+			IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		default:
+			AESNI_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+	} else if (is_3DES) {
+		uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
+				sess->cipher.exp_3des_keys.key[1],
+				sess->cipher.exp_3des_keys.key[2]};
+
+		switch (xform->cipher.key.length) {
+		case  24:
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
+			IMB_DES_KEYSCHED(mb_mgr, keys[1],
+					xform->cipher.key.data + 8);
+			IMB_DES_KEYSCHED(mb_mgr, keys[2],
+					xform->cipher.key.data + 16);
+
+			/* Initialize keys - 24 bytes: [K1-K2-K3] */
+			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
+			break;
+		case 16:
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
+			IMB_DES_KEYSCHED(mb_mgr, keys[1],
+					xform->cipher.key.data + 8);
+			/* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
+			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+			break;
+		case 8:
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
+
+			/* Initialize keys - 8 bytes: [K1 = K2 = K3] */
+			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
+			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+			break;
+		default:
+			AESNI_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+
+		sess->cipher.key_length_in_bytes = 24;
+	} else {
+		if (xform->cipher.key.length != 8) {
+			AESNI_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+		sess->cipher.key_length_in_bytes = 8;
+
+		IMB_DES_KEYSCHED(mb_mgr,
+			(uint64_t *)sess->cipher.expanded_aes_keys.encode,
+				xform->cipher.key.data);
+		IMB_DES_KEYSCHED(mb_mgr,
+			(uint64_t *)sess->cipher.expanded_aes_keys.decode,
+				xform->cipher.key.data);
+	}
+
+	return 0;
+}
+
+static int
+aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	switch (xform->aead.op) {
+	case RTE_CRYPTO_AEAD_OP_ENCRYPT:
+		sess->cipher.direction = ENCRYPT;
+		sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
+		break;
+	case RTE_CRYPTO_AEAD_OP_DECRYPT:
+		sess->cipher.direction = DECRYPT;
+		sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Invalid aead operation parameter");
+		return -EINVAL;
+	}
+
+	switch (xform->aead.algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		sess->cipher.mode = CCM;
+		sess->auth.algo = AES_CCM;
+
+		/* Check key length and choose key expansion function for AES */
+		switch (xform->aead.key.length) {
+		case AES_128_BYTES:
+			sess->cipher.key_length_in_bytes = AES_128_BYTES;
+			IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		default:
+			AESNI_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+
+		break;
+
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		sess->cipher.mode = GCM;
+		sess->auth.algo = AES_GMAC;
+
+		switch (xform->aead.key.length) {
+		case AES_128_BYTES:
+			sess->cipher.key_length_in_bytes = AES_128_BYTES;
+			IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
+			break;
+		case AES_192_BYTES:
+			sess->cipher.key_length_in_bytes = AES_192_BYTES;
+			IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
+			break;
+		case AES_256_BYTES:
+			sess->cipher.key_length_in_bytes = AES_256_BYTES;
+			IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
+			break;
+		default:
+			AESNI_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+
+		break;
+
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
+		return -ENOTSUP;
+	}
+
+	/* Set IV parameters */
+	sess->iv.offset = xform->aead.iv.offset;
+	sess->iv.length = xform->aead.iv.length;
+
+	sess->auth.req_digest_len = xform->aead.digest_length;
+	/* CCM digests must be between 4 and 16 and an even number */
+	if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
+			sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
+			(sess->auth.req_digest_len & 1) == 1) {
+		AESNI_MB_LOG(ERR, "Invalid digest size\n");
+		return -EINVAL;
+	}
+	sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+	return 0;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_sym_xform *auth_xform = NULL;
+	const struct rte_crypto_sym_xform *cipher_xform = NULL;
+	const struct rte_crypto_sym_xform *aead_xform = NULL;
+	int ret;
+
+	/* Select Crypto operation - hash then cipher / cipher then hash */
+	switch (aesni_mb_get_chain_order(xform)) {
+	case AESNI_MB_OP_HASH_CIPHER:
+		sess->chain_order = HASH_CIPHER;
+		auth_xform = xform;
+		cipher_xform = xform->next;
+		break;
+	case AESNI_MB_OP_CIPHER_HASH:
+		sess->chain_order = CIPHER_HASH;
+		auth_xform = xform->next;
+		cipher_xform = xform;
+		break;
+	case AESNI_MB_OP_HASH_ONLY:
+		sess->chain_order = HASH_CIPHER;
+		auth_xform = xform;
+		cipher_xform = NULL;
+		break;
+	case AESNI_MB_OP_CIPHER_ONLY:
+		/*
+		 * Multi buffer library operates only at two modes,
+		 * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
+		 * chain order depends on cipher operation: encryption is always
+		 * the first operation and decryption the last one.
+		 */
+		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+			sess->chain_order = CIPHER_HASH;
+		else
+			sess->chain_order = HASH_CIPHER;
+		auth_xform = NULL;
+		cipher_xform = xform;
+		break;
+	case AESNI_MB_OP_AEAD_CIPHER_HASH:
+		sess->chain_order = CIPHER_HASH;
+		sess->aead.aad_len = xform->aead.aad_length;
+		aead_xform = xform;
+		break;
+	case AESNI_MB_OP_AEAD_HASH_CIPHER:
+		sess->chain_order = HASH_CIPHER;
+		sess->aead.aad_len = xform->aead.aad_length;
+		aead_xform = xform;
+		break;
+	case AESNI_MB_OP_NOT_SUPPORTED:
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter");
+		return -ENOTSUP;
+	}
+
+	/* Default IV length = 0 */
+	sess->iv.length = 0;
+
+	ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
+	if (ret != 0) {
+		AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
+		return ret;
+	}
+
+	ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
+			cipher_xform);
+	if (ret != 0) {
+		AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
+		return ret;
+	}
+
+	if (aead_xform) {
+		ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
+				aead_xform);
+		if (ret != 0) {
+			AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * burst enqueue, place crypto operations on ingress queue for processing.
+ *
+ * @param __qp         Queue Pair to process
+ * @param ops          Crypto operations for processing
+ * @param nb_ops       Number of crypto operations for processing
+ *
+ * @return
+ * - Number of crypto operations enqueued
+ */
+static uint16_t
+aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct aesni_mb_qp *qp = __qp;
+
+	unsigned int nb_enqueued;
+
+	nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
+			(void **)ops, nb_ops, NULL);
+
+	qp->stats.enqueued_count += nb_enqueued;
+
+	return nb_enqueued;
+}
+
+/** Get multi buffer session */
+static inline struct aesni_mb_session *
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
+{
+	struct aesni_mb_session *sess = NULL;
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		if (likely(op->sym->session != NULL))
+			sess = (struct aesni_mb_session *)
+					get_sym_session_private_data(
+					op->sym->session,
+					cryptodev_driver_id);
+	} else {
+		void *_sess = NULL;
+		void *_sess_private_data = NULL;
+
+		if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+			return NULL;
+
+		if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+			return NULL;
+
+		sess = (struct aesni_mb_session *)_sess_private_data;
+
+		if (unlikely(aesni_mb_set_session_parameters(qp->mb_mgr,
+				sess, op->sym->xform) != 0)) {
+			rte_mempool_put(qp->sess_mp, _sess);
+			rte_mempool_put(qp->sess_mp, _sess_private_data);
+			sess = NULL;
+		}
+		op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+		set_sym_session_private_data(op->sym->session,
+				cryptodev_driver_id, _sess_private_data);
+	}
+
+	if (unlikely(sess == NULL))
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+	return sess;
+}
+
+/**
+ * Process a crypto operation and complete a JOB_AES_HMAC job structure for
+ * submission to the multi buffer library for processing.
+ *
+ * @param	qp	queue pair
+ * @param	job	JOB_AES_HMAC structure to fill
+ * @param	m	mbuf to process
+ *
+ * @return
+ * - Completed JOB_AES_HMAC structure pointer on success
+ * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
+ */
+static inline int
+set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
+		struct rte_crypto_op *op, uint8_t *digest_idx)
+{
+	struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+	struct aesni_mb_session *session;
+	uint16_t m_offset = 0;
+
+	session = get_session(qp, op);
+	if (session == NULL) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+		return -1;
+	}
+
+	/* Set crypto operation */
+	job->chain_order = session->chain_order;
+
+	/* Set cipher parameters */
+	job->cipher_direction = session->cipher.direction;
+	job->cipher_mode = session->cipher.mode;
+
+	job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
+
+	/* Set authentication parameters */
+	job->hash_alg = session->auth.algo;
+
+	switch (job->hash_alg) {
+	case AES_XCBC:
+		job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
+		job->u.XCBC._k2 = session->auth.xcbc.k2;
+		job->u.XCBC._k3 = session->auth.xcbc.k3;
+
+		job->aes_enc_key_expanded =
+				session->cipher.expanded_aes_keys.encode;
+		job->aes_dec_key_expanded =
+				session->cipher.expanded_aes_keys.decode;
+		break;
+
+	case AES_CCM:
+		job->u.CCM.aad = op->sym->aead.aad.data + 18;
+		job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
+		job->aes_enc_key_expanded =
+				session->cipher.expanded_aes_keys.encode;
+		job->aes_dec_key_expanded =
+				session->cipher.expanded_aes_keys.decode;
+		break;
+
+	case AES_CMAC:
+		job->u.CMAC._key_expanded = session->auth.cmac.expkey;
+		job->u.CMAC._skey1 = session->auth.cmac.skey1;
+		job->u.CMAC._skey2 = session->auth.cmac.skey2;
+		job->aes_enc_key_expanded =
+				session->cipher.expanded_aes_keys.encode;
+		job->aes_dec_key_expanded =
+				session->cipher.expanded_aes_keys.decode;
+		break;
+
+	case AES_GMAC:
+		job->u.GCM.aad = op->sym->aead.aad.data;
+		job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
+		job->aes_enc_key_expanded = &session->cipher.gcm_key;
+		job->aes_dec_key_expanded = &session->cipher.gcm_key;
+		break;
+
+	default:
+		job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
+		job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
+
+		if (job->cipher_mode == DES3) {
+			job->aes_enc_key_expanded =
+				session->cipher.exp_3des_keys.ks_ptr;
+			job->aes_dec_key_expanded =
+				session->cipher.exp_3des_keys.ks_ptr;
+		} else {
+			job->aes_enc_key_expanded =
+				session->cipher.expanded_aes_keys.encode;
+			job->aes_dec_key_expanded =
+				session->cipher.expanded_aes_keys.decode;
+		}
+	}
+
+	/* Mutable crypto operation parameters */
+	if (op->sym->m_dst) {
+		m_src = m_dst = op->sym->m_dst;
+
+		/* append space for output data to mbuf */
+		char *odata = rte_pktmbuf_append(m_dst,
+				rte_pktmbuf_data_len(op->sym->m_src));
+		if (odata == NULL) {
+			AESNI_MB_LOG(ERR, "failed to allocate space in destination "
+					"mbuf for source data");
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
+		}
+
+		memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
+				rte_pktmbuf_data_len(op->sym->m_src));
+	} else {
+		m_dst = m_src;
+		if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC)
+			m_offset = op->sym->aead.data.offset;
+		else
+			m_offset = op->sym->cipher.data.offset;
+	}
+
+	/* Set digest output location */
+	if (job->hash_alg != NULL_HASH &&
+			session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+		job->auth_tag_output = qp->temp_digests[*digest_idx];
+		*digest_idx = (*digest_idx + 1) % MAX_JOBS;
+	} else {
+		if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC)
+			job->auth_tag_output = op->sym->aead.digest.data;
+		else
+			job->auth_tag_output = op->sym->auth.digest.data;
+
+		if (session->auth.req_digest_len != session->auth.gen_digest_len) {
+			job->auth_tag_output = qp->temp_digests[*digest_idx];
+			*digest_idx = (*digest_idx + 1) % MAX_JOBS;
+		}
+	}
+	/*
+	 * Multi-buffer library current only support returning a truncated
+	 * digest length as specified in the relevant IPsec RFCs
+	 */
+
+	/* Set digest length */
+	job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
+
+	/* Set IV parameters */
+	job->iv_len_in_bytes = session->iv.length;
+
+	/* Data  Parameter */
+	job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+	job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
+
+	switch (job->hash_alg) {
+	case AES_CCM:
+		job->cipher_start_src_offset_in_bytes =
+				op->sym->aead.data.offset;
+		job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
+		job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
+		job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
+
+		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+			session->iv.offset + 1);
+		break;
+
+	case AES_GMAC:
+		job->cipher_start_src_offset_in_bytes =
+				op->sym->aead.data.offset;
+		job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
+		job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
+		job->msg_len_to_hash_in_bytes = job->msg_len_to_cipher_in_bytes;
+		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+				session->iv.offset);
+		break;
+
+	default:
+		job->cipher_start_src_offset_in_bytes =
+				op->sym->cipher.data.offset;
+		job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
+
+		job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+		job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
+
+		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+			session->iv.offset);
+	}
+
+	/* Set user data to be crypto operation data struct */
+	job->user_data = op;
+
+	return 0;
+}
+
+static inline void
+verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
+		struct aesni_mb_session *sess)
+{
+	/* Verify digest if required */
+	if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC) {
+		if (memcmp(job->auth_tag_output, op->sym->aead.digest.data,
+				sess->auth.req_digest_len) != 0)
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	} else {
+		if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
+				sess->auth.req_digest_len) != 0)
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	}
+}
+
+static inline void
+generate_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
+		struct aesni_mb_session *sess)
+{
+	/* No extra copy neeed */
+	if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
+		return;
+
+	/*
+	 * This can only happen for HMAC, so only digest
+	 * for authentication algos is required
+	 */
+	memcpy(op->sym->auth.digest.data, job->auth_tag_output,
+			sess->auth.req_digest_len);
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param qp		Queue Pair to process
+ * @param job	JOB_AES_HMAC job to process
+ *
+ * @return
+ * - Returns processed crypto operation.
+ * - Returns NULL on invalid job
+ */
+static inline struct rte_crypto_op *
+post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
+{
+	struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
+	struct aesni_mb_session *sess = get_sym_session_private_data(
+							op->sym->session,
+							cryptodev_driver_id);
+
+	if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
+		switch (job->status) {
+		case STS_COMPLETED:
+			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+			if (job->hash_alg != NULL_HASH) {
+				if (sess->auth.operation ==
+						RTE_CRYPTO_AUTH_OP_VERIFY)
+					verify_digest(job, op, sess);
+				else
+					generate_digest(job, op, sess);
+			}
+			break;
+		default:
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		}
+	}
+
+	/* Free session if a session-less crypto op */
+	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		memset(sess, 0, sizeof(struct aesni_mb_session));
+		memset(op->sym->session, 0,
+				rte_cryptodev_sym_get_header_session_size());
+		rte_mempool_put(qp->sess_mp, sess);
+		rte_mempool_put(qp->sess_mp, op->sym->session);
+		op->sym->session = NULL;
+	}
+
+	return op;
+}
+
+/**
+ * Process a completed JOB_AES_HMAC job and keep processing jobs until
+ * get_completed_job return NULL
+ *
+ * @param qp		Queue Pair to process
+ * @param job		JOB_AES_HMAC job
+ *
+ * @return
+ * - Number of processed jobs
+ */
+static unsigned
+handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct rte_crypto_op *op = NULL;
+	unsigned processed_jobs = 0;
+
+	while (job != NULL) {
+		op = post_process_mb_job(qp, job);
+
+		if (op) {
+			ops[processed_jobs++] = op;
+			qp->stats.dequeued_count++;
+		} else {
+			qp->stats.dequeue_err_count++;
+			break;
+		}
+		if (processed_jobs == nb_ops)
+			break;
+
+		job = IMB_GET_COMPLETED_JOB(qp->mb_mgr);
+	}
+
+	return processed_jobs;
+}
+
+static inline uint16_t
+flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	int processed_ops = 0;
+
+	/* Flush the remaining jobs */
+	JOB_AES_HMAC *job = IMB_FLUSH_JOB(qp->mb_mgr);
+
+	if (job)
+		processed_ops += handle_completed_jobs(qp, job,
+				&ops[processed_ops], nb_ops - processed_ops);
+
+	return processed_ops;
+}
+
+static inline JOB_AES_HMAC *
+set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
+{
+	job->chain_order = HASH_CIPHER;
+	job->cipher_mode = NULL_CIPHER;
+	job->hash_alg = NULL_HASH;
+	job->cipher_direction = DECRYPT;
+
+	/* Set user data to be crypto operation data struct */
+	job->user_data = op;
+
+	return job;
+}
+
+static uint16_t
+aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct aesni_mb_qp *qp = queue_pair;
+
+	struct rte_crypto_op *op;
+	JOB_AES_HMAC *job;
+
+	int retval, processed_jobs = 0;
+
+	if (unlikely(nb_ops == 0))
+		return 0;
+
+	uint8_t digest_idx = qp->digest_idx;
+	do {
+		/* Get next free mb job struct from mb manager */
+		job = IMB_GET_NEXT_JOB(qp->mb_mgr);
+		if (unlikely(job == NULL)) {
+			/* if no free mb job structs we need to flush mb_mgr */
+			processed_jobs += flush_mb_mgr(qp,
+					&ops[processed_jobs],
+					nb_ops - processed_jobs);
+
+			if (nb_ops == processed_jobs)
+				break;
+
+			job = IMB_GET_NEXT_JOB(qp->mb_mgr);
+		}
+
+		/*
+		 * Get next operation to process from ingress queue.
+		 * There is no need to return the job to the MB_MGR
+		 * if there are no more operations to process, since the MB_MGR
+		 * can use that pointer again in next get_next calls.
+		 */
+		retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
+		if (retval < 0)
+			break;
+
+		retval = set_mb_job_params(job, qp, op, &digest_idx);
+		if (unlikely(retval != 0)) {
+			qp->stats.dequeue_err_count++;
+			set_job_null_op(job, op);
+		}
+
+		/* Submit job to multi-buffer for processing */
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
+		job = IMB_SUBMIT_JOB(qp->mb_mgr);
+#else
+		job = IMB_SUBMIT_JOB_NOCHECK(qp->mb_mgr);
+#endif
+		/*
+		 * If submit returns a processed job then handle it,
+		 * before submitting subsequent jobs
+		 */
+		if (job)
+			processed_jobs += handle_completed_jobs(qp, job,
+					&ops[processed_jobs],
+					nb_ops - processed_jobs);
+
+	} while (processed_jobs < nb_ops);
+
+	qp->digest_idx = digest_idx;
+
+	if (processed_jobs < 1)
+		processed_jobs += flush_mb_mgr(qp,
+				&ops[processed_jobs],
+				nb_ops - processed_jobs);
+
+	return processed_jobs;
+}
+
+static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
+
+static int
+cryptodev_aesni_mb_create(const char *name,
+			struct rte_vdev_device *vdev,
+			struct rte_cryptodev_pmd_init_params *init_params)
+{
+	struct rte_cryptodev *dev;
+	struct aesni_mb_private *internals;
+	enum aesni_mb_vector_mode vector_mode;
+	MB_MGR *mb_mgr;
+
+	/* Check CPU for support for AES instruction set */
+	if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+		AESNI_MB_LOG(ERR, "AES instructions not supported by CPU");
+		return -EFAULT;
+	}
+
+	dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+	if (dev == NULL) {
+		AESNI_MB_LOG(ERR, "failed to create cryptodev vdev");
+		return -ENODEV;
+	}
+
+	/* Check CPU for supported vector instruction set */
+	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+		vector_mode = RTE_AESNI_MB_AVX512;
+	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+		vector_mode = RTE_AESNI_MB_AVX2;
+	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+		vector_mode = RTE_AESNI_MB_AVX;
+	else
+		vector_mode = RTE_AESNI_MB_SSE;
+
+	dev->driver_id = cryptodev_driver_id;
+	dev->dev_ops = rte_aesni_mb_pmd_ops;
+
+	/* register rx/tx burst functions for data path */
+	dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
+	dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
+
+	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+			RTE_CRYPTODEV_FF_CPU_AESNI;
+
+	mb_mgr = alloc_mb_mgr(0);
+	if (mb_mgr == NULL)
+		return -ENOMEM;
+
+	switch (vector_mode) {
+	case RTE_AESNI_MB_SSE:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+		init_mb_mgr_sse(mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+		init_mb_mgr_avx(mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX2:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+		init_mb_mgr_avx2(mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX512:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+		init_mb_mgr_avx512(mb_mgr);
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
+		goto error_exit;
+	}
+
+	/* Set vector instructions mode supported */
+	internals = dev->data->dev_private;
+
+	internals->vector_mode = vector_mode;
+	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+	internals->mb_mgr = mb_mgr;
+
+	AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
+			imb_get_version_str());
+
+	return 0;
+
+error_exit:
+	if (mb_mgr)
+		free_mb_mgr(mb_mgr);
+
+	rte_cryptodev_pmd_destroy(dev);
+
+	return -1;
+}
+
+static int
+cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
+{
+	struct rte_cryptodev_pmd_init_params init_params = {
+		"",
+		sizeof(struct aesni_mb_private),
+		rte_socket_id(),
+		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+	};
+	const char *name, *args;
+	int retval;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	args = rte_vdev_device_args(vdev);
+
+	retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+	if (retval) {
+		AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]",
+				args);
+		return -EINVAL;
+	}
+
+	return cryptodev_aesni_mb_create(name, vdev, &init_params);
+}
+
+static int
+cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
+{
+	struct rte_cryptodev *cryptodev;
+	struct aesni_mb_private *internals;
+	const char *name;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	internals = cryptodev->data->dev_private;
+
+	free_mb_mgr(internals->mb_mgr);
+
+	return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
+	.probe = cryptodev_aesni_mb_probe,
+	.remove = cryptodev_aesni_mb_remove
+};
+
+static struct cryptodev_driver aesni_mb_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
+	"max_nb_queue_pairs=<int> "
+	"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
+		cryptodev_aesni_mb_pmd_drv.driver,
+		cryptodev_driver_id);
+
+RTE_INIT(aesni_mb_init_log)
+{
+	aesni_mb_logtype_driver = rte_log_register("pmd.crypto.aesni_mb");
+}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
new file mode 100644
index 000000000..5788e37d1
--- /dev/null
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -0,0 +1,681 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2017 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_aesni_mb_pmd_private.h"
+
+
+static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 16,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 20,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA224 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 28,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 32,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 48,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 16,
+					.increment = 4
+				}
+			}, }
+		}, }
+	},
+	{	/* AES DOCSIS BPI */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/*  3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 8,
+					.max = 24,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* DES DOCSIS BPI */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
+				.block_size = 8,
+				.key_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 4,
+					.max = 16,
+					.increment = 2
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 46,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 7,
+					.max = 13,
+					.increment = 1
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 16,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 65535,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused struct rte_cryptodev_config *config)
+{
+	return 0;
+}
+
+/** Start device */
+static int
+aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+/** Stop device */
+static void
+aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+
+/** Get device statistics */
+static void
+aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->stats.enqueued_count;
+		stats->dequeued_count += qp->stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->stats.dequeue_err_count;
+	}
+}
+
+/** Reset device statistics */
+static void
+aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->stats, 0, sizeof(qp->stats));
+	}
+}
+
+
+/** Get device info */
+static void
+aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_info *dev_info)
+{
+	struct aesni_mb_private *internals = dev->data->dev_private;
+
+	if (dev_info != NULL) {
+		dev_info->driver_id = dev->driver_id;
+		dev_info->feature_flags = dev->feature_flags;
+		dev_info->capabilities = aesni_mb_pmd_capabilities;
+		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+		/* No limit of number of sessions */
+		dev_info->sym.max_nb_sessions = 0;
+	}
+}
+
+/** Release queue pair */
+static int
+aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+	struct rte_ring *r = NULL;
+
+	if (qp != NULL) {
+		r = rte_ring_lookup(qp->name);
+		if (r)
+			rte_ring_free(r);
+		if (qp->mb_mgr)
+			free_mb_mgr(qp->mb_mgr);
+		rte_free(qp);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+		struct aesni_mb_qp *qp)
+{
+	unsigned n = snprintf(qp->name, sizeof(qp->name),
+			"aesni_mb_pmd_%u_qp_%u",
+			dev->data->dev_id, qp->id);
+
+	if (n >= sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
+		unsigned int ring_size, int socket_id)
+{
+	struct rte_ring *r;
+	char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+	unsigned int n = snprintf(ring_name, sizeof(ring_name), "%s", qp->name);
+
+	if (n >= sizeof(ring_name))
+		return NULL;
+
+	r = rte_ring_lookup(ring_name);
+	if (r) {
+		if (rte_ring_get_size(r) >= ring_size) {
+			AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops",
+			ring_name);
+			return r;
+		}
+
+		AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops",
+			ring_name);
+		return NULL;
+	}
+
+	return rte_ring_create(ring_name, ring_size, socket_id,
+			RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		const struct rte_cryptodev_qp_conf *qp_conf,
+		int socket_id, struct rte_mempool *session_pool)
+{
+	struct aesni_mb_qp *qp = NULL;
+	struct aesni_mb_private *internals = dev->data->dev_private;
+	int ret = -1;
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		aesni_mb_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
+					RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL)
+		return -ENOMEM;
+
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
+		goto qp_setup_cleanup;
+
+
+	qp->mb_mgr = alloc_mb_mgr(0);
+	if (qp->mb_mgr == NULL) {
+		ret = -ENOMEM;
+		goto qp_setup_cleanup;
+	}
+
+	switch (internals->vector_mode) {
+	case RTE_AESNI_MB_SSE:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+		init_mb_mgr_sse(qp->mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+		init_mb_mgr_avx(qp->mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX2:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+		init_mb_mgr_avx2(qp->mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX512:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+		init_mb_mgr_avx512(qp->mb_mgr);
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n",
+				internals->vector_mode);
+		goto qp_setup_cleanup;
+	}
+
+	qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
+			qp_conf->nb_descriptors, socket_id);
+	if (qp->ingress_queue == NULL) {
+		ret = -1;
+		goto qp_setup_cleanup;
+	}
+
+	qp->sess_mp = session_pool;
+
+	memset(&qp->stats, 0, sizeof(qp->stats));
+
+	char mp_name[RTE_MEMPOOL_NAMESIZE];
+
+	snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+				"digest_mp_%u_%u", dev->data->dev_id, qp_id);
+	return 0;
+
+qp_setup_cleanup:
+	if (qp) {
+		if (qp->mb_mgr)
+			free_mb_mgr(qp->mb_mgr);
+		rte_free(qp);
+	}
+
+	return ret;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
+{
+	return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni multi-buffer session structure */
+static unsigned
+aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct aesni_mb_session);
+}
+
+/** Configure a aesni multi-buffer session from a crypto xform chain */
+static int
+aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_sym_xform *xform,
+		struct rte_cryptodev_sym_session *sess,
+		struct rte_mempool *mempool)
+{
+	void *sess_private_data;
+	struct aesni_mb_private *internals = dev->data->dev_private;
+	int ret;
+
+	if (unlikely(sess == NULL)) {
+		AESNI_MB_LOG(ERR, "invalid session struct");
+		return -EINVAL;
+	}
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		AESNI_MB_LOG(ERR,
+				"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	ret = aesni_mb_set_session_parameters(internals->mb_mgr,
+			sess_private_data, xform);
+	if (ret != 0) {
+		AESNI_MB_LOG(ERR, "failed configure session parameters");
+
+		/* Return session to mempool */
+		rte_mempool_put(mempool, sess_private_data);
+		return ret;
+	}
+
+	set_sym_session_private_data(sess, dev->driver_id,
+			sess_private_data);
+
+	return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
+		struct rte_cryptodev_sym_session *sess)
+{
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_sym_session_private_data(sess, index);
+
+	/* Zero out the whole structure */
+	if (sess_priv) {
+		memset(sess_priv, 0, sizeof(struct aesni_mb_session));
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+		set_sym_session_private_data(sess, index, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
+	}
+}
+
+struct rte_cryptodev_ops aesni_mb_pmd_ops = {
+		.dev_configure		= aesni_mb_pmd_config,
+		.dev_start		= aesni_mb_pmd_start,
+		.dev_stop		= aesni_mb_pmd_stop,
+		.dev_close		= aesni_mb_pmd_close,
+
+		.stats_get		= aesni_mb_pmd_stats_get,
+		.stats_reset		= aesni_mb_pmd_stats_reset,
+
+		.dev_infos_get		= aesni_mb_pmd_info_get,
+
+		.queue_pair_setup	= aesni_mb_pmd_qp_setup,
+		.queue_pair_release	= aesni_mb_pmd_qp_release,
+		.queue_pair_count	= aesni_mb_pmd_qp_count,
+
+		.sym_session_get_size	= aesni_mb_pmd_sym_session_get_size,
+		.sym_session_configure	= aesni_mb_pmd_sym_session_configure,
+		.sym_session_clear	= aesni_mb_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index d8021cdaa..d61abfe4f 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -5,7 +5,32 @@
 #ifndef _RTE_AESNI_MB_PMD_PRIVATE_H_
 #define _RTE_AESNI_MB_PMD_PRIVATE_H_
 
+#include <intel-ipsec-mb.h>
+
+
+/*
+ * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
+ * so if macro is not defined, it means that the version is 0.49.
+ */
+#if !defined(IMB_VERSION_NUM)
+#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
+#endif
+
+#if IMB_VERSION_NUM < IMB_VERSION(0, 52, 0)
 #include "aesni_mb_ops.h"
+#endif
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 52, 0)
+enum aesni_mb_vector_mode {
+	RTE_AESNI_MB_NOT_SUPPORTED = 0,
+	RTE_AESNI_MB_SSE,
+	RTE_AESNI_MB_AVX,
+	RTE_AESNI_MB_AVX2,
+	RTE_AESNI_MB_AVX512
+};
+#endif
+
 
 #define CRYPTODEV_NAME_AESNI_MB_PMD	crypto_aesni_mb
 /**< AES-NI Multi buffer PMD device name */
@@ -83,7 +108,9 @@ static const unsigned auth_digest_byte_lengths[] = {
 		[AES_XCBC]	= 16,
 		[AES_CMAC]	= 16,
 		[AES_GMAC]	= 12,
-		[NULL_HASH]		= 0
+		[NULL_HASH]	= 0,
+	/**< Vector mode dependent pointer table of the multi-buffer APIs */
+
 };
 
 /**
@@ -115,6 +142,10 @@ struct aesni_mb_private {
 	/**< CPU vector instruction set mode */
 	unsigned max_nb_queue_pairs;
 	/**< Max number of queue pairs supported by device */
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 52, 0)
+	MB_MGR *mb_mgr;
+	/**< Multi-buffer instance */
+#endif
 };
 
 /** AESNI Multi buffer queue pair */
@@ -122,13 +153,15 @@ struct aesni_mb_qp {
 	uint16_t id;
 	/**< Queue Pair Identifier */
 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+#if IMB_VERSION_NUM < IMB_VERSION(0, 52, 0)
 	/**< Unique Queue Pair Name */
 	const struct aesni_mb_op_fns *op_fns;
-	/**< Vector mode dependent pointer table of the multi-buffer APIs */
+#endif
+	/**< Unique Queue Pair Name */
 	MB_MGR *mb_mgr;
 	/**< Multi-buffer instance */
 	struct rte_ring *ingress_queue;
-       /**< Ring for placing operations ready for processing */
+	/**< Ring for placing operations ready for processing */
 	struct rte_mempool *sess_mp;
 	/**< Session Mempool */
 	struct rte_cryptodev_stats stats;
@@ -153,7 +186,9 @@ struct aesni_mb_session {
 	} iv;
 	/**< IV parameters */
 
-	/** Cipher Parameters */
+	/** Cipher Parameters */const struct aesni_mb_op_fns *op_fns;
+	/**< Vector mode dependent pointer table of the multi-buffer APIs */
+
 	struct {
 		/** Cipher direction - encrypt / decrypt */
 		JOB_CIPHER_DIRECTION direction;
@@ -234,14 +269,21 @@ struct aesni_mb_session {
 } __rte_cache_aligned;
 
 
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 52, 0)
 /**
  *
  */
 extern int
+aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform);
+#else
+extern int
 aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
 		struct aesni_mb_session *sess,
 		const struct rte_crypto_sym_xform *xform);
-
+#endif
 
 /** device specific operations function pointer structure */
 extern struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops;
-- 
2.13.6

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v4 3/3] doc: update documentation
  2018-12-20 11:56     ` [dpdk-dev] [PATCH v4 0/3] use architecure independent macros Fan Zhang
  2018-12-20 11:56       ` [dpdk-dev] [PATCH v4 1/3] crypto/aesni_mb: rename files Fan Zhang
  2018-12-20 11:56       ` [dpdk-dev] [PATCH v4 2/3] crypto/aesni_mb: use architecture independent macros Fan Zhang
@ 2018-12-20 11:56       ` Fan Zhang
  2019-01-09 22:09       ` [dpdk-dev] [PATCH v4 0/3] use architecure independent macros De Lara Guarch, Pablo
  3 siblings, 0 replies; 15+ messages in thread
From: Fan Zhang @ 2018-12-20 11:56 UTC (permalink / raw)
  To: dev; +Cc: akhil.goyal, pablo.de.lara.guarch

This patch updates the AESNI-MB PMD document with the new intel-ipsec-mb
version number, the release note with new library version support, and
the deprecation notice for removing some library version support in
19.05 release.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 doc/guides/cryptodevs/aesni_mb.rst     | 4 ++--
 doc/guides/rel_notes/deprecation.rst   | 3 +++
 doc/guides/rel_notes/release_19_02.rst | 3 +++
 3 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index 63e060d75..12532c63e 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -59,8 +59,8 @@ Installation
 To build DPDK with the AESNI_MB_PMD the user is required to download the multi-buffer
 library from `here <https://github.com/01org/intel-ipsec-mb>`_
 and compile it on their user system before building DPDK.
-The latest version of the library supported by this PMD is v0.50, which
-can be downloaded from `<https://github.com/01org/intel-ipsec-mb/archive/v0.50.zip>`_.
+The latest version of the library supported by this PMD is v0.52, which
+can be downloaded from `<https://github.com/01org/intel-ipsec-mb/archive/v0.52.zip>`_.
 
 .. code-block:: console
 
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index ac7fb29a7..0578978d8 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -98,3 +98,6 @@ Deprecation Notices
   - The size and layout of ``rte_cryptodev_qp_conf`` and syntax of
     ``rte_cryptodev_queue_pair_setup`` will change to to allow to use
     two different mempools for crypto and device private sessions.
+
+ * aesni_mb: the minimum supported intel-ipsec-mb library version will be
+   changed from 0.49.0 to 0.52.0.
diff --git a/doc/guides/rel_notes/release_19_02.rst b/doc/guides/rel_notes/release_19_02.rst
index 069f429a7..161974c77 100644
--- a/doc/guides/rel_notes/release_19_02.rst
+++ b/doc/guides/rel_notes/release_19_02.rst
@@ -65,6 +65,9 @@ New Features
   Added a new performance test tool to test the compressdev PMD. The tool tests
   compression ratio and compression throughput.
 
+* **updated the AESNI-MB PMD.**
+  * Added support for intel-ipsec-mb version 0.52.
+
 
 Removed Items
 -------------
-- 
2.13.6

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [dpdk-dev] [PATCH v4 0/3] use architecure independent macros
  2018-12-20 11:56     ` [dpdk-dev] [PATCH v4 0/3] use architecure independent macros Fan Zhang
                         ` (2 preceding siblings ...)
  2018-12-20 11:56       ` [dpdk-dev] [PATCH v4 3/3] doc: update documentation Fan Zhang
@ 2019-01-09 22:09       ` De Lara Guarch, Pablo
  3 siblings, 0 replies; 15+ messages in thread
From: De Lara Guarch, Pablo @ 2019-01-09 22:09 UTC (permalink / raw)
  To: Zhang, Roy Fan, dev; +Cc: akhil.goyal



> -----Original Message-----
> From: Zhang, Roy Fan
> Sent: Thursday, December 20, 2018 11:57 AM
> To: dev@dpdk.org
> Cc: akhil.goyal@nxp.com; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>
> Subject: [PATCH v4 0/3] use architecure independent macros
> 
> This patch updates the aesni_mb to use IMB_* arch independent macros to
> reduce the code size and future maintaining effort.
> 
> In intel-ipsec-mb library 0.52 all supported algorithms now have the IMB_*
> arch independent macros enabled. The macros help reducing the
> application's code size and remove the burden of maintaining the support to
> different architectures such as SSE and AVX*, etc.
> 
> This patch adds this support into AESNI-MB PMD. Meanwhile to keep
> supporting the older version of intel-ipsec-mb library, the existing
> rte_aesni_mb_pmd*.c are renamed to rte_aesni_mb_pmd*_compat.c and
> the compiler will check the version number in /usr/include/inte-ipsec-mb.h
> and decides which files to be compiled. For intel-ipsec-mb library 0.52 the
> rte_aesni_mb_pmd*.c will be compiled. For the older version
> rte_aesni_mb_pmd*_compat.c will be compiled.
> 
> It is planned to change the minimum intel-ipsec-mb support version to 0.52
> in DPDK 19.05 release. By then all code intended for supporting older version
> will be removed, including rte_aesni_mb_pmd*_compat.c.
> 
> Acked-by: Damian Nowak <damianx.nowak@intel.com>

Series applied to dpdk-next-crypto.

Thanks,
Pablo

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2019-01-09 22:09 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-11-23 14:04 [dpdk-dev] [PATCH] crypto/aesni_mb: use of archtecture independent macros Fan Zhang
2018-12-11 12:29 ` [dpdk-dev] [PATCH v2] crypto/aesni_mb: use architure independent marcos Fan Zhang
2018-12-18 10:26   ` Akhil Goyal
2018-12-19 13:08   ` Thomas Monjalon
2018-12-19 13:48     ` Zhang, Roy Fan
2018-12-19 20:16   ` [dpdk-dev] [PATCH v3 0/4] use architecure independent macros Fan Zhang
2018-12-19 20:16     ` [dpdk-dev] [PATCH v3 1/4] crypto/aesni_mb: rename files to compatible Fan Zhang
2018-12-19 20:16     ` [dpdk-dev] [PATCH v3 2/4] crypto/aesni_mb: use architecture independent macros Fan Zhang
2018-12-19 20:16     ` [dpdk-dev] [PATCH v3 3/4] doc: update library support version Fan Zhang
2018-12-19 20:16     ` [dpdk-dev] [PATCH v3 4/4] doc: update deprecation notice Fan Zhang
2018-12-20 11:56     ` [dpdk-dev] [PATCH v4 0/3] use architecure independent macros Fan Zhang
2018-12-20 11:56       ` [dpdk-dev] [PATCH v4 1/3] crypto/aesni_mb: rename files Fan Zhang
2018-12-20 11:56       ` [dpdk-dev] [PATCH v4 2/3] crypto/aesni_mb: use architecture independent macros Fan Zhang
2018-12-20 11:56       ` [dpdk-dev] [PATCH v4 3/3] doc: update documentation Fan Zhang
2019-01-09 22:09       ` [dpdk-dev] [PATCH v4 0/3] use architecure independent macros De Lara Guarch, Pablo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).