DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] aesni_gcm: PMD to support AES_GCM crypto operations
@ 2016-01-30 13:09 Declan Doherty
  2016-01-30 16:40 ` O'Driscoll, Tim
  2016-03-08 10:09 ` [dpdk-dev] [PATCH v2] " Pablo de Lara
  0 siblings, 2 replies; 9+ messages in thread
From: Declan Doherty @ 2016-01-30 13:09 UTC (permalink / raw)
  To: dev

This patch provides the implementation of an AES-NI accelerated crypto PMD
which is dependent on Intel's multi-buffer library, see the white paper
"Fast Multi-buffer IPsec Implementations on Intel®  Architecture  Processors"

This PMD supports AES_GCM authenticated encryption and authenticated decryption using
128-bit AES keys

The patch also contains the related unit tests functions for the implemented functionality

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
---
 MAINTAINERS                                        |   4 +
 app/test/test_cryptodev.c                          | 462 +++++++++++++++++++
 app/test/test_cryptodev_gcm_test_vectors.h         | 423 +++++++++++++++++
 config/common_linuxapp                             |  11 +-
 config/defconfig_i686-native-linuxapp-gcc          |  10 +
 drivers/crypto/Makefile                            |   1 +
 drivers/crypto/aesni_gcm/Makefile                  |  66 +++
 drivers/crypto/aesni_gcm/aesni_gcm_ops.h           | 127 ++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c           | 498 +++++++++++++++++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c       | 292 ++++++++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h   | 120 +++++
 .../crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map |   3 +
 lib/librte_cryptodev/rte_cryptodev.h               |   5 +
 mk/rte.app.mk                                      |  11 +-
 14 files changed, 2028 insertions(+), 5 deletions(-)
 create mode 100644 app/test/test_cryptodev_gcm_test_vectors.h
 create mode 100644 drivers/crypto/aesni_gcm/Makefile
 create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_ops.h
 create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
 create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
 create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
 create mode 100644 drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index b90aeea..f2b3657 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -348,6 +348,10 @@ Null PMD
 M: Tetsuya Mukawa <mukawa@igel.co.jp>
 F: drivers/net/null/
 
+Intel AES-NI GCM PMD
+M: Declan Doherty <declan.doherty@intel.com>
+F: drivers/crypto/aesni_gcm/
+
 Intel AES-NI Multi-Buffer
 M: Declan Doherty <declan.doherty@intel.com>
 F: drivers/crypto/aesni_mb/
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index fd5b7ec..f09635f 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -43,6 +43,7 @@
 
 #include "test.h"
 #include "test_cryptodev.h"
+#include "test_cryptodev_gcm_test_vectors.h"
 
 static enum rte_cryptodev_type gbl_cryptodev_type;
 
@@ -70,6 +71,9 @@ struct crypto_unittest_params {
 	uint8_t *digest;
 };
 
+#define ALIGN_POW2_ROUNDUP(num, align) \
+		(((num) + (align) - 1) & ~((align) - 1))
+
 /*
  * Forward declarations.
  */
@@ -188,6 +192,23 @@ testsuite_setup(void)
 		}
 	}
 
+	/* Create 2 AESNI GCM devices if required */
+	if (gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_GCM_PMD) {
+		nb_devs = rte_cryptodev_count_devtype(
+				RTE_CRYPTODEV_AESNI_GCM_PMD);
+		if (nb_devs < 2) {
+			for (i = nb_devs; i < 2; i++) {
+				int dev_id = rte_eal_vdev_init(
+					CRYPTODEV_NAME_AESNI_GCM_PMD, NULL);
+
+				TEST_ASSERT(dev_id >= 0,
+					"Failed to create instance %u of"
+					" pmd : %s",
+					i, CRYPTODEV_NAME_AESNI_GCM_PMD);
+			}
+		}
+	}
+
 	nb_devs = rte_cryptodev_count();
 	if (nb_devs < 1) {
 		RTE_LOG(ERR, USER1, "No crypto devices found?");
@@ -1685,6 +1706,392 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
 /* ***** AES-GCM Tests ***** */
 
 static int
+create_gcm_session(uint8_t dev_id, enum rte_crypto_cipher_operation op,
+		const uint8_t *key, const uint8_t key_len,
+		const uint8_t aad_len, const uint8_t auth_len)
+{
+	uint8_t cipher_key[key_len];
+
+	struct crypto_unittest_params *ut_params = &unittest_params;
+
+
+	memcpy(cipher_key, key, key_len);
+
+	/* Setup Cipher Parameters */
+	ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+	ut_params->cipher_xform.next = NULL;
+
+	ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM;
+	ut_params->cipher_xform.cipher.op = op;
+	ut_params->cipher_xform.cipher.key.data = cipher_key;
+	ut_params->cipher_xform.cipher.key.length = key_len;
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "key:", key, key_len);
+#endif
+	/* Setup Authentication Parameters */
+	ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+	ut_params->auth_xform.next = NULL;
+
+	ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_AES_GCM;
+
+	ut_params->auth_xform.auth.digest_length = auth_len;
+	ut_params->auth_xform.auth.add_auth_data_length = aad_len;
+	ut_params->auth_xform.auth.key.length = 0;
+	ut_params->auth_xform.auth.key.data = NULL;
+
+	if (op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+		ut_params->cipher_xform.next = &ut_params->auth_xform;
+
+		/* Create Crypto session*/
+		ut_params->sess = rte_cryptodev_session_create(dev_id,
+				&ut_params->cipher_xform);
+	} else {/* Create Crypto session*/
+		ut_params->auth_xform.next = &ut_params->cipher_xform;
+		ut_params->sess = rte_cryptodev_session_create(dev_id,
+				&ut_params->auth_xform);
+	}
+
+	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
+
+	return 0;
+}
+
+static int
+create_gcm_operation(enum rte_crypto_cipher_operation op,
+		const uint8_t *auth_tag, const unsigned auth_tag_len,
+		const uint8_t *iv, const unsigned iv_len,
+		const uint8_t *aad, const unsigned aad_len,
+		const unsigned data_len, unsigned data_pad_len)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+
+	unsigned iv_pad_len = 0, aad_buffer_len;
+
+	/* Generate Crypto op data structure */
+	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
+			RTE_PKTMBUF_OL_CRYPTO);
+	TEST_ASSERT_NOT_NULL(ut_params->ol,
+			"Failed to allocate pktmbuf offload");
+
+	ut_params->op = &ut_params->ol->op.crypto;
+
+
+	/* digest */
+	ut_params->op->digest.data = (uint8_t *)rte_pktmbuf_append(
+			ut_params->ibuf, auth_tag_len);
+
+	TEST_ASSERT_NOT_NULL(ut_params->op->digest.data,
+			"no room to append auth tag");
+
+	ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+			ut_params->ibuf, data_pad_len);
+	ut_params->op->digest.length = auth_tag_len;
+
+	if (op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+		rte_memcpy(ut_params->op->digest.data, auth_tag, auth_tag_len);
+
+#ifdef RTE_APP_TEST_DEBUG
+		rte_hexdump(stdout, "digest:",
+				ut_params->op->digest.data,
+				ut_params->op->digest.length);
+#endif
+	}
+
+	/* iv */
+	iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
+
+	ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+			ut_params->ibuf, iv_pad_len);
+	TEST_ASSERT_NOT_NULL(ut_params->op->iv.data, "no room to prepend iv");
+
+	memset(ut_params->op->iv.data, 0, iv_pad_len);
+	ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+	ut_params->op->iv.length = iv_pad_len;
+
+	rte_memcpy(ut_params->op->iv.data, iv, iv_len);
+
+	/* CalcY0 */
+	if (iv_len != 16)
+		ut_params->op->iv.data[15] = 1;
+
+	/*
+	 * Always allocate the aad up to the block size.
+	 * The cryptodev API calls out -
+	 *  - the array must be big enough to hold the AAD, plus any
+	 *   space to round this up to the nearest multiple of the
+	 *   block size (16 bytes).
+	 */
+	aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 16);
+
+	ut_params->op->additional_auth.data = (uint8_t *)rte_pktmbuf_prepend(
+			ut_params->ibuf, aad_buffer_len);
+	TEST_ASSERT_NOT_NULL(ut_params->op->additional_auth.data,
+			"no room to prepend aad");
+	ut_params->op->additional_auth.phys_addr = rte_pktmbuf_mtophys(
+			ut_params->ibuf);
+	ut_params->op->additional_auth.length = aad_len;
+
+	memset(ut_params->op->additional_auth.data, 0, aad_buffer_len);
+	rte_memcpy(ut_params->op->additional_auth.data, aad, aad_len);
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "iv:", ut_params->op->iv.data, iv_pad_len);
+	rte_hexdump(stdout, "aad:",
+			ut_params->op->additional_auth.data, aad_len);
+#endif
+	ut_params->op->data.to_cipher.length = data_len;
+	ut_params->op->data.to_cipher.offset = aad_buffer_len + iv_pad_len;
+
+	ut_params->op->data.to_hash.length = data_len;
+	ut_params->op->data.to_hash.offset = aad_buffer_len + iv_pad_len;
+
+	return 0;
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+
+	int retval;
+
+	uint8_t *plaintext, *ciphertext, *auth_tag;
+	uint16_t plaintext_pad_len;
+
+	/* Create GCM session */
+	retval = create_gcm_session(ts_params->valid_devs[0],
+			RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+			tdata->key.data, tdata->key.len,
+			tdata->aad.len, tdata->auth_tag.len);
+	if (retval < 0)
+		return retval;
+
+
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+	/* clear mbuf payload */
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+			rte_pktmbuf_tailroom(ut_params->ibuf));
+
+	/* Append data which is padded to a multiple of the algorithms block
+	 * size */
+	plaintext_pad_len = RTE_ALIGN_CEIL(tdata->plaintext.len, 16);
+
+	plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+			plaintext_pad_len);
+	memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len);
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "plaintext:", plaintext, tdata->plaintext.len);
+#endif
+	/* Create GCM opertaion */
+	retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+			tdata->auth_tag.data, tdata->auth_tag.len,
+			tdata->iv.data, tdata->iv.len,
+			tdata->aad.data, tdata->aad.len,
+			tdata->plaintext.len, plaintext_pad_len);
+	if (retval < 0)
+		return retval;
+
+	rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+
+	rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+
+	ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
+			ut_params->ibuf);
+	TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+
+	if (ut_params->op->dst.m) {
+		ciphertext = rte_pktmbuf_mtod(ut_params->op->dst.m, uint8_t *);
+		auth_tag = rte_pktmbuf_mtod_offset(ut_params->op->dst.m,
+				uint8_t *, plaintext_pad_len);
+	} else {
+		ciphertext = plaintext;
+		auth_tag = plaintext + plaintext_pad_len;
+	}
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
+	rte_hexdump(stdout, "auth tag:", auth_tag, tdata->auth_tag.len);
+#endif
+	/* Validate obuf */
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			ciphertext,
+			tdata->ciphertext.data,
+			tdata->ciphertext.len,
+			"GCM Ciphertext data not as expected");
+
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			auth_tag,
+			tdata->auth_tag.data,
+			tdata->auth_tag.len,
+			"GCM Generated auth tag not as expected");
+
+	return 0;
+
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_1(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_1);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_2(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_2);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_3(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_3);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_4(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_4);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_5(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_5);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_6(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_6);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_7(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_7);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+
+	int retval;
+
+	uint8_t *plaintext, *ciphertext;
+	uint16_t ciphertext_pad_len;
+
+	/* Create GCM session */
+	retval = create_gcm_session(ts_params->valid_devs[0],
+			RTE_CRYPTO_CIPHER_OP_DECRYPT,
+			tdata->key.data, tdata->key.len,
+			tdata->aad.len, tdata->auth_tag.len);
+	if (retval < 0)
+		return retval;
+
+
+	/* alloc mbuf and set payload */
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+			rte_pktmbuf_tailroom(ut_params->ibuf));
+
+	ciphertext_pad_len = RTE_ALIGN_CEIL(tdata->ciphertext.len, 16);
+
+	ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+			ciphertext_pad_len);
+	memcpy(ciphertext, tdata->ciphertext.data, tdata->ciphertext.len);
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
+#endif
+	/* Create GCM opertaion */
+	retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_DECRYPT,
+			tdata->auth_tag.data, tdata->auth_tag.len,
+			tdata->iv.data, tdata->iv.len,
+			tdata->aad.data, tdata->aad.len,
+			tdata->ciphertext.len, ciphertext_pad_len);
+	if (retval < 0)
+		return retval;
+
+	rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+
+	rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+
+	ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
+			ut_params->ibuf);
+	TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+
+	if (ut_params->op->dst.m)
+		plaintext = rte_pktmbuf_mtod(ut_params->op->dst.m, uint8_t *);
+	else
+		plaintext = ciphertext;
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "plaintext:", plaintext, tdata->ciphertext.len);
+#endif
+	/* Validate obuf */
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			plaintext,
+			tdata->plaintext.data,
+			tdata->plaintext.len,
+			"GCM plaintext data not as expected");
+
+	TEST_ASSERT_EQUAL(ut_params->ol->op.crypto.status,
+			RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"GCM authentication failed");
+	return 0;
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_1(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_1);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_2(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_2);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_3(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_3);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_4(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_4);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_5(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_5);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_6(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_6);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_7(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_7);
+}
+
+static int
 test_stats(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -1958,6 +2365,47 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite  = {
 	}
 };
 
+static struct unit_test_suite cryptodev_aesni_gcm_testsuite  = {
+	.suite_name = "Crypto Device AESNI GCM Unit Test Suite",
+	.setup = testsuite_setup,
+	.teardown = testsuite_teardown,
+	.unit_test_cases = {
+		/** AES GCM Authenticated Encryption */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_2),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_7),
+
+		/** AES GCM Authenticated Decryption */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_2),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_7),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
+
 static int
 test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
 {
@@ -1982,5 +2430,19 @@ static struct test_command cryptodev_aesni_mb_cmd = {
 	.callback = test_cryptodev_aesni_mb,
 };
 
+static int
+test_cryptodev_aesni_gcm(void)
+{
+	gbl_cryptodev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
+
+	return unit_test_suite_runner(&cryptodev_aesni_gcm_testsuite);
+}
+
+static struct test_command cryptodev_aesni_gcm_cmd = {
+	.command = "cryptodev_aesni_gcm_autotest",
+	.callback = test_cryptodev_aesni_gcm,
+};
+
 REGISTER_TEST_COMMAND(cryptodev_qat_cmd);
 REGISTER_TEST_COMMAND(cryptodev_aesni_mb_cmd);
+REGISTER_TEST_COMMAND(cryptodev_aesni_gcm_cmd);
diff --git a/app/test/test_cryptodev_gcm_test_vectors.h b/app/test/test_cryptodev_gcm_test_vectors.h
new file mode 100644
index 0000000..8ae22ba
--- /dev/null
+++ b/app/test/test_cryptodev_gcm_test_vectors.h
@@ -0,0 +1,423 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	 * Redistributions of source code must retain the above copyright
+ *	   notice, this list of conditions and the following disclaimer.
+ *	 * Redistributions in binary form must reproduce the above copyright
+ *	   notice, this list of conditions and the following disclaimer in
+ *	   the documentation and/or other materials provided with the
+ *	   distribution.
+ *	 * Neither the name of Intel Corporation nor the names of its
+ *	   contributors may be used to endorse or promote products derived
+ *	   from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEST_CRYPTODEV_GCM_TEST_VECTORS_H_
+#define TEST_CRYPTODEV_GCM_TEST_VECTORS_H_
+
+struct gcm_test_data {
+	struct {
+		uint8_t data[64];
+		unsigned len;
+	} key;
+
+	struct {
+		uint8_t data[64] __rte_aligned(16);
+		unsigned len;
+	} iv;
+
+	struct {
+		uint8_t data[64];
+		unsigned len;
+	} aad;
+
+	struct {
+		uint8_t data[1024];
+		unsigned len;
+	} plaintext;
+
+	struct {
+		uint8_t data[1024];
+		unsigned len;
+	} ciphertext;
+
+	struct {
+		uint8_t data[16];
+		unsigned len;
+	} auth_tag;
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_1 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00 },
+		.len = 12
+	},
+	.aad = {
+		.data = { 0 },
+		.len = 0
+	},
+	.plaintext = {
+		.data = {
+			0x00 },
+		.len = 0
+	},
+	.ciphertext = {
+		.data = {
+			0x00
+		},
+		.len = 0
+	},
+	.auth_tag = {
+		.data = {
+			0x58, 0xe2, 0xfc, 0xce, 0xfa, 0x7e, 0x30, 0x61,
+			0x36, 0x7f, 0x1d, 0x57, 0xa4, 0xe7, 0x45, 0x5a },
+		.len = 16
+	}
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_2 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00 },
+		.len = 12
+	},
+	.aad = {
+		.data = { 0 },
+		.len = 0
+	},
+	.plaintext = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+		.len = 16
+	},
+	.ciphertext = {
+		.data = {
+			0x03, 0x88, 0xda, 0xce, 0x60, 0xb6, 0xa3, 0x92,
+			0xf3, 0x28, 0xc2, 0xb9, 0x71, 0xb2, 0xfe, 0x78 },
+		.len = 16
+	},
+	.auth_tag = {
+		.data = {
+			0xab, 0x6e, 0x47, 0xd4, 0x2c, 0xec, 0x13, 0xbd,
+			0xf5, 0x3a, 0x67, 0xb2, 0x12, 0x57, 0xbd, 0xdf },
+		.len = 16
+	}
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_3 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88 },
+		.len = 12
+	},
+	.aad = {
+		.data = { 0 },
+		.len = 0
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
+		.len = 64
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85
+		},
+		.len = 64
+	},
+	.auth_tag = {
+		.data = {
+			0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6,
+			0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4 },
+		.len = 16
+	}
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_4 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88 },
+		.len = 12
+	},
+	.aad = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+		.len = 8
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39
+		},
+		.len = 60
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91
+		},
+		.len = 60
+	},
+	.auth_tag = {
+		.data = {
+			0xA2, 0xA4, 0x35, 0x75, 0xDC, 0xB0, 0x57, 0x74,
+			0x07, 0x02, 0x30, 0xC2, 0xE7, 0x52, 0x02, 0x00
+		},
+		.len = 16
+	}
+
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_5 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88 },
+		.len = 12
+	},
+	.aad = {
+		.data = {
+			0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef },
+		.len = 8
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39
+		},
+		.len = 60
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91
+		},
+		.len = 60
+	},
+	.auth_tag = {
+		.data = {
+			0xC5, 0x2D, 0xFB, 0x54, 0xAF, 0xBB, 0x07, 0xA1,
+			0x9A, 0xFF, 0xBE, 0xE0, 0x61, 0x4C, 0xE7, 0xA5
+		},
+		.len = 16
+	}
+
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_6 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88
+		},
+		.len = 12
+	},
+	.aad = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00
+		},
+		.len = 12
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39
+		},
+		.len = 60
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91
+		},
+		.len = 60
+	},
+	.auth_tag = {
+		.data = {
+			0x74, 0xFC, 0xFA, 0x29, 0x3E, 0x60, 0xCC, 0x66,
+			0x09, 0xD6, 0xFD, 0x00, 0xC8, 0x86, 0xD5, 0x42
+		},
+		.len = 16
+	}
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_7 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88
+		},
+		.len = 12
+	},
+	.aad = {
+		.data = {
+			0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
+			0xfe, 0xed, 0xfa, 0xce
+		},
+		.len = 12
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39
+		},
+		.len = 60
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91
+		},
+		.len = 60
+	},
+	.auth_tag = {
+		.data = {
+			0xE9, 0xE4, 0xAB, 0x76, 0xB7, 0xFF, 0xEA, 0xDC,
+			0x69, 0x79, 0x38, 0xA2, 0x0D, 0xCA, 0xF5, 0x92
+		},
+		.len = 16
+	}
+};
+
+
+#endif /* TEST_CRYPTODEV_GCM_TEST_VECTORS_H_ */
diff --git a/config/common_linuxapp b/config/common_linuxapp
index 74bc515..980959a 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -336,7 +336,7 @@ CONFIG_RTE_CRYPTODEV_NAME_LEN=64
 #
 # Compile PMD for QuickAssist based devices
 #
-CONFIG_RTE_LIBRTE_PMD_QAT=n
+CONFIG_RTE_LIBRTE_PMD_QAT=y
 CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_INIT=n
 CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_TX=n
 CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_RX=n
@@ -350,11 +350,18 @@ CONFIG_RTE_QAT_PMD_MAX_NB_SESSIONS=2048
 #
 # Compile PMD for AESNI backed device
 #
-CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
+CONFIG_RTE_LIBRTE_PMD_AESNI_MB=y
 CONFIG_RTE_LIBRTE_PMD_AESNI_MB_DEBUG=n
 CONFIG_RTE_AESNI_MB_PMD_MAX_NB_QUEUE_PAIRS=8
 CONFIG_RTE_AESNI_MB_PMD_MAX_NB_SESSIONS=2048
 
+# Compile PMD for AESNI GCM  device
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=y
+CONFIG_RTE_LIBRTE_PMD_AESNI_GCM_DEBUG=n
+CONFIG_RTE_AESNI_GCM_PMD_MAX_NB_QUEUE_PAIRS=8
+CONFIG_RTE_AESNI_GCM_PMD_MAX_NB_SESSIONS=2048
+
 #
 # Compile librte_ring
 #
diff --git a/config/defconfig_i686-native-linuxapp-gcc b/config/defconfig_i686-native-linuxapp-gcc
index a90de9b..8563b30 100644
--- a/config/defconfig_i686-native-linuxapp-gcc
+++ b/config/defconfig_i686-native-linuxapp-gcc
@@ -49,3 +49,13 @@ CONFIG_RTE_LIBRTE_KNI=n
 # Vectorized PMD is not supported on 32-bit
 #
 CONFIG_RTE_IXGBE_INC_VECTOR=n
+
+#
+# AES-NI multi-buffer PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
+
+#
+# AES-NI GCM PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index d07ee96..9cd44a4 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -32,6 +32,7 @@
 include $(RTE_SDK)/mk/rte.vars.mk
 
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
 
 include $(RTE_SDK)/mk/rte.sharelib.mk
diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile
new file mode 100644
index 0000000..0832259
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/Makefile
@@ -0,0 +1,66 @@
+#   BSD LICENSE
+#
+#   Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
+$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
+endif
+
+# library name
+LIB = librte_pmd_aesni_gcm.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_aesni_gcm_version.map
+
+# external library include paths
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
new file mode 100644
index 0000000..c399068
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -0,0 +1,127 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AESNI_GCM_OPS_H_
+#define _AESNI_GCM_OPS_H_
+
+#ifndef LINUX
+#define LINUX
+#endif
+
+#include <gcm_defines.h>
+#include <aux_funcs.h>
+
+/** Supported vector modes */
+enum aesni_gcm_vector_mode {
+	RTE_AESNI_GCM_NOT_SUPPORTED = 0,
+	RTE_AESNI_GCM_SSE,
+	RTE_AESNI_GCM_AVX,
+	RTE_AESNI_GCM_AVX2
+};
+
+typedef void (*aes_keyexp_128_enc_t)(void *key, void *enc_exp_keys);
+
+typedef void (*aesni_gcm_t)(gcm_data *my_ctx_data, u8 *out, const u8 *in,
+		u64 plaintext_len, u8 *iv, const u8 *aad, u64 aad_len,
+		u8 *auth_tag, u64 auth_tag_len);
+
+typedef void (*aesni_gcm_precomp_t)(gcm_data *my_ctx_data, u8 *hash_subkey);
+
+/** GCM library function pointer table */
+struct aesni_gcm_ops {
+	struct {
+		struct {
+			aes_keyexp_128_enc_t aes128_enc;
+			/**< AES128 enc key expansion */
+		} keyexp;
+		/**< Key expansion functions */
+	} aux; /**< Auxiliary functions */
+
+	struct {
+		aesni_gcm_t enc;	/**< GCM encode function pointer */
+		aesni_gcm_t dec;	/**< GCM decode function pointer */
+		aesni_gcm_precomp_t precomp;	/**< GCM pre-compute */
+	} gcm; /**< GCM functions */
+};
+
+
+static const struct aesni_gcm_ops gcm_ops[] = {
+	[RTE_AESNI_GCM_NOT_SUPPORTED] = {
+		.aux = {
+			.keyexp = {
+				NULL
+			}
+		},
+		.gcm = {
+			NULL
+		}
+	},
+	[RTE_AESNI_GCM_SSE] = {
+		.aux = {
+			.keyexp = {
+				aes_keyexp_128_enc_sse
+			}
+		},
+		.gcm = {
+			aesni_gcm_enc_sse,
+			aesni_gcm_dec_sse,
+			aesni_gcm_precomp_sse
+		}
+	},
+	[RTE_AESNI_GCM_AVX] = {
+		.aux = {
+			.keyexp = {
+				aes_keyexp_128_enc_avx,
+			}
+		},
+		.gcm = {
+			aesni_gcm_enc_avx_gen2,
+			aesni_gcm_dec_avx_gen2,
+			aesni_gcm_precomp_avx_gen2
+		}
+	},
+	[RTE_AESNI_GCM_AVX2] = {
+		.aux = {
+			.keyexp = {
+				aes_keyexp_128_enc_avx2,
+			}
+		},
+		.gcm = {
+			aesni_gcm_enc_avx_gen4,
+			aesni_gcm_dec_avx_gen4,
+			aesni_gcm_precomp_avx_gen4
+		}
+	}
+};
+
+
+#endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
new file mode 100644
index 0000000..312c3ff
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -0,0 +1,498 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <openssl/aes.h>
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+#include <rte_mbuf_offload.h>
+
+#include "aesni_gcm_pmd_private.h"
+
+/**
+ * Global static parameter used to create a unique name for each AES-NI multi
+ * buffer crypto device.
+ */
+static unsigned unique_name_id;
+
+static inline int
+create_unique_device_name(char *name, size_t size)
+{
+	int ret;
+
+	if (name == NULL)
+		return -EINVAL;
+
+	ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_AESNI_GCM_PMD,
+			unique_name_id++);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+static int
+aesni_gcm_calculate_hash_sub_key(uint8_t *hsubkey, unsigned hsubkey_length,
+		uint8_t *aeskey, unsigned aeskey_length)
+{
+	uint8_t key[aeskey_length] __rte_aligned(16);
+	AES_KEY enc_key;
+
+	if (hsubkey_length % 16 != 0 && aeskey_length % 16 != 0)
+		return -EFAULT;
+
+	memcpy(key, aeskey, aeskey_length);
+
+	if (AES_set_encrypt_key(key, aeskey_length << 3, &enc_key) != 0)
+		return -EFAULT;
+
+	AES_encrypt(hsubkey, hsubkey, &enc_key);
+
+	return 0;
+}
+
+/** Get xform chain order */
+static int
+aesni_gcm_get_mode(const struct rte_crypto_xform *xform)
+{
+	/*
+	 * GCM only supports authenticated encryption or authenticated
+	 * decryption, all other options are invalid, so we must have exactly
+	 * 2 xform structs chained together
+	 */
+	if (xform->next == NULL || xform->next->next != NULL)
+		return -1;
+
+	if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
+			xform->next->type == RTE_CRYPTO_XFORM_AUTH) {
+		return AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+	}
+
+	if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
+			xform->next->type == RTE_CRYPTO_XFORM_CIPHER) {
+		return AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+	}
+
+	return -1;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
+		struct aesni_gcm_session *sess,
+		const struct rte_crypto_xform *xform)
+{
+	const struct rte_crypto_xform *auth_xform = NULL;
+	const struct rte_crypto_xform *cipher_xform = NULL;
+
+	uint8_t hsubkey[16] __rte_aligned(16) = { 0 };
+
+	/* Select Crypto operation - hash then cipher / cipher then hash */
+	switch (aesni_gcm_get_mode(xform)) {
+	case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
+		sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+
+		cipher_xform = xform;
+		auth_xform = xform->next;
+		break;
+	case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
+		sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+
+		auth_xform = xform;
+		cipher_xform = xform->next;
+		break;
+	default:
+		GCM_LOG_ERR("Unsupported operation chain order parameter");
+		return -EINVAL;
+	}
+
+	/* We only support AES GCM */
+	if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM &&
+			auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GCM)
+		return -EINVAL;
+
+	/* Select cipher direction */
+	if (sess->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION &&
+			cipher_xform->cipher.op !=
+					RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+		GCM_LOG_ERR("xform chain (CIPHER/AUTH) and cipher operation "
+				"(DECRYPT) specified are an invalid selection");
+		return -EINVAL;
+	} else if (sess->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION &&
+			cipher_xform->cipher.op !=
+					RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+		GCM_LOG_ERR("xform chain (AUTH/CIPHER) and cipher operation "
+				"(ENCRYPT) specified are an invalid selection");
+		return -EINVAL;
+	}
+
+	/* Expand GCM AES128 key */
+	(*gcm_ops->aux.keyexp.aes128_enc)(cipher_xform->cipher.key.data,
+			sess->gdata.expanded_keys);
+
+	/* Calculate hash sub key here */
+	aesni_gcm_calculate_hash_sub_key(hsubkey, sizeof(hsubkey),
+			cipher_xform->cipher.key.data,
+			cipher_xform->cipher.key.length);
+
+	/* Calculate GCM pre-compute */
+	(*gcm_ops->gcm.precomp)(&sess->gdata, hsubkey);
+
+	return 0;
+}
+
+/** Get gcm session */
+static struct aesni_gcm_session *
+aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *crypto_op)
+{
+	struct aesni_gcm_session *sess = NULL;
+
+	if (crypto_op->type == RTE_CRYPTO_OP_WITH_SESSION) {
+		if (unlikely(crypto_op->session->type !=
+				RTE_CRYPTODEV_AESNI_GCM_PMD))
+			return sess;
+
+		sess = (struct aesni_gcm_session *)crypto_op->session->_private;
+	} else  {
+		void *_sess;
+
+		if (rte_mempool_get(qp->sess_mp, &_sess))
+			return sess;
+
+		sess = (struct aesni_gcm_session *)
+			((struct rte_cryptodev_session *)_sess)->_private;
+
+		if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
+				sess, crypto_op->xform) != 0)) {
+			rte_mempool_put(qp->sess_mp, _sess);
+			sess = NULL;
+		}
+	}
+	return sess;
+}
+
+/**
+ * Process a crypto operation and complete a JOB_AES_HMAC job structure for
+ * submission to the multi buffer library for processing.
+ *
+ * @param	qp	queue pair
+ * @param	job	JOB_AES_HMAC structure to fill
+ * @param	m	mbuf to process
+ *
+ * @return
+ *
+ */
+static int
+process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_mbuf *m,
+		struct rte_crypto_op *c_op, struct aesni_gcm_session *session)
+{
+	uint8_t *src, *dst;
+
+	src = rte_pktmbuf_mtod(m, uint8_t *) + c_op->data.to_cipher.offset;
+	dst = c_op->dst.m ?
+			rte_pktmbuf_mtod(c_op->dst.m, uint8_t *) +
+			c_op->dst.offset :
+			rte_pktmbuf_mtod(m, uint8_t *) +
+			c_op->data.to_cipher.offset;
+
+	/* sanity checks */
+	if (c_op->iv.length != 16 && c_op->iv.length != 0) {
+		GCM_LOG_ERR("iv");
+		return -1;
+	}
+
+	if (c_op->additional_auth.length != 12 &&
+			c_op->additional_auth.length != 8 &&
+			c_op->additional_auth.length != 0) {
+		GCM_LOG_ERR("iv");
+		return -1;
+	}
+
+	if (c_op->digest.length != 16 && c_op->digest.length != 12 &&
+			c_op->digest.length != 8 &&
+			c_op->digest.length != 0) {
+		GCM_LOG_ERR("iv");
+		return -1;
+	}
+
+	if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
+
+		(*qp->ops->gcm.enc)(&session->gdata, dst, src,
+				(uint64_t)c_op->data.to_cipher.length,
+				c_op->iv.data,
+				c_op->additional_auth.data,
+				(uint64_t)c_op->additional_auth.length,
+				c_op->digest.data,
+				(uint64_t)c_op->digest.length);
+	} else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+		uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(m,
+				c_op->digest.length);
+
+		if (!auth_tag) {
+			GCM_LOG_ERR("iv");
+			return -1;
+		}
+
+		(*qp->ops->gcm.dec)(&session->gdata, dst, src,
+				(uint64_t)c_op->data.to_cipher.length,
+				c_op->iv.data,
+				c_op->additional_auth.data,
+				(uint64_t)c_op->additional_auth.length,
+				auth_tag,
+				(uint64_t)c_op->digest.length);
+	} else {
+		GCM_LOG_ERR("iv");
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param job	JOB_AES_HMAC job to process
+ *
+ * @return
+ * - Returns processed mbuf which is trimmed of output digest used in
+ * verification of supplied digest in the case of a HASH_CIPHER operation
+ * - Returns NULL on invalid job
+ */
+static void
+post_process_gcm_crypto_op(struct rte_mbuf *m, struct rte_crypto_op *c_op)
+{
+	struct aesni_gcm_session *session =
+			(struct aesni_gcm_session *)c_op->session->_private;
+
+	c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+	/* Verify digest if required */
+	if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+
+		uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
+				m->data_len - c_op->digest.length);
+
+#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
+		rte_hexdump(stdout, "auth tag (orig):",
+				c_op->digest.data, c_op->digest.length);
+		rte_hexdump(stdout, "auth tag (calc):",
+				tag, c_op->digest.length);
+#endif
+
+		if (memcmp(tag, c_op->digest.data, c_op->digest.length) != 0)
+			c_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+		/* trim area used for digest from mbuf */
+		rte_pktmbuf_trim(m, c_op->digest.length);
+	}
+}
+
+/**
+ * Process a completed GCM request
+ *
+ * @param qp		Queue Pair to process
+ * @param job		JOB_AES_HMAC job
+ *
+ * @return
+ * - Number of processed jobs
+ */
+static void
+handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_mbuf *m,
+		struct rte_crypto_op *c_op)
+{
+	post_process_gcm_crypto_op(m, c_op);
+
+	/* Free session if a session-less crypto op */
+	if (c_op->type == RTE_CRYPTO_OP_SESSIONLESS) {
+		rte_mempool_put(qp->sess_mp, c_op->session);
+		c_op->session = NULL;
+	}
+
+	rte_ring_enqueue(qp->processed_pkts, (void *)m);
+}
+
+static uint16_t
+aesni_gcm_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
+		uint16_t nb_bufs)
+{
+	struct rte_mbuf_offload *ol;
+	struct rte_crypto_op *c_op;
+
+	struct aesni_gcm_session *sess;
+	struct aesni_gcm_qp *qp = queue_pair;
+
+	int i, retval = 0;
+
+	for (i = 0; i < nb_bufs; i++) {
+		ol = rte_pktmbuf_offload_get(bufs[i], RTE_PKTMBUF_OL_CRYPTO);
+		if (unlikely(ol == NULL)) {
+			qp->qp_stats.enqueue_err_count++;
+			break;
+		}
+		c_op = &ol->op.crypto;
+
+		sess = aesni_gcm_get_session(qp, c_op);
+		if (unlikely(sess == NULL)) {
+			c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			qp->qp_stats.enqueue_err_count++;
+			break;
+		}
+
+		retval = process_gcm_crypto_op(qp, bufs[i], c_op, sess);
+		if (retval < 0) {
+			c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			qp->qp_stats.enqueue_err_count++;
+			break;
+		}
+
+		handle_completed_gcm_crypto_op(qp, bufs[i], c_op);
+
+		qp->qp_stats.enqueued_count++;
+	}
+	return i;
+}
+
+static uint16_t
+aesni_gcm_pmd_dequeue_burst(void *queue_pair,
+		struct rte_mbuf **bufs,	uint16_t nb_bufs)
+{
+	struct aesni_gcm_qp *qp = queue_pair;
+
+	unsigned nb_dequeued;
+
+	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+			(void **)bufs, nb_bufs);
+	qp->qp_stats.dequeued_count += nb_dequeued;
+
+	return nb_dequeued;
+}
+
+static int aesni_gcm_uninit(const char *name);
+
+static int
+aesni_gcm_create(const char *name, unsigned socket_id)
+{
+	struct rte_cryptodev *dev;
+	char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct aesni_gcm_private *internals;
+	enum aesni_gcm_vector_mode vector_mode;
+
+	/* Check CPU for support for AES instruction set */
+	if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+		GCM_LOG_ERR("AES instructions not supported by CPU");
+		return -EFAULT;
+	}
+
+	/* Check CPU for supported vector instruction set */
+	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+		vector_mode = RTE_AESNI_GCM_AVX2;
+	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+		vector_mode = RTE_AESNI_GCM_AVX;
+	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+		vector_mode = RTE_AESNI_GCM_SSE;
+	else {
+		GCM_LOG_ERR("Vector instructions are not supported by CPU");
+		return -EFAULT;
+	}
+
+	/* create a unique device name */
+	if (create_unique_device_name(crypto_dev_name,
+			RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
+		GCM_LOG_ERR("failed to create unique cryptodev name");
+		return -EINVAL;
+	}
+
+
+	dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+			sizeof(struct aesni_gcm_private), socket_id);
+	if (dev == NULL) {
+		GCM_LOG_ERR("failed to create cryptodev vdev");
+		goto init_error;
+	}
+
+	dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
+	dev->dev_ops = rte_aesni_gcm_pmd_ops;
+
+	/* register rx/tx burst functions for data path */
+	dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
+	dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
+
+	/* Set vector instructions mode supported */
+	internals = dev->data->dev_private;
+
+	internals->vector_mode = vector_mode;
+
+	internals->max_nb_queue_pairs = RTE_AESNI_GCM_PMD_MAX_NB_QUEUE_PAIRS;
+	internals->max_nb_sessions = RTE_AESNI_GCM_PMD_MAX_NB_SESSIONS;
+
+	return dev->data->dev_id;
+init_error:
+	GCM_LOG_ERR("driver %s: create failed", name);
+
+	aesni_gcm_uninit(crypto_dev_name);
+	return -EFAULT;
+}
+
+static int
+aesni_gcm_init(const char *name,
+		const char *params __rte_unused)
+{
+	GCM_LOG_INFO("Initialising %s\n", name);
+
+	return aesni_gcm_create(name, rte_socket_id());
+}
+
+static int
+aesni_gcm_uninit(const char *name)
+{
+	if (name == NULL)
+		return -EINVAL;
+
+	GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n",
+			name, rte_socket_id());
+
+	return 0;
+}
+
+static struct rte_driver aesni_gcm_pmd_drv = {
+	.name = CRYPTODEV_NAME_AESNI_GCM_PMD,
+	.type = PMD_VDEV,
+	.init = aesni_gcm_init,
+	.uninit = aesni_gcm_uninit
+};
+
+PMD_REGISTER_DRIVER(aesni_gcm_pmd_drv);
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
new file mode 100644
index 0000000..79b2d4d
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -0,0 +1,292 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "aesni_gcm_pmd_private.h"
+
+/** Configure device */
+static int
+aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+/** Start device */
+static int
+aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+/** Stop device */
+static void
+aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+
+/** Get device statistics */
+static void
+aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->qp_stats.enqueued_count;
+		stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+	}
+}
+
+/** Reset device statistics */
+static void
+aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	}
+}
+
+
+/** Get device info */
+static void
+aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_info *dev_info)
+{
+	struct aesni_gcm_private *internals = dev->data->dev_private;
+
+	if (dev_info != NULL) {
+		dev_info->dev_type = dev->dev_type;
+
+		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+		dev_info->max_nb_sessions = internals->max_nb_sessions;
+	}
+}
+
+/** Release queue pair */
+static int
+aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	if (dev->data->queue_pairs[qp_id] != NULL) {
+		rte_free(dev->data->queue_pairs[qp_id]);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+		struct aesni_gcm_qp *qp)
+{
+	unsigned n = snprintf(qp->name, sizeof(qp->name),
+			"aesni_gcm_pmd_%u_qp_%u",
+			dev->data->dev_id, qp->id);
+
+	if (n > sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
+		unsigned ring_size, int socket_id)
+{
+	struct rte_ring *r;
+
+	r = rte_ring_lookup(qp->name);
+	if (r) {
+		if (r->prod.size >= ring_size) {
+			GCM_LOG_INFO("Reusing existing ring %s for processed"
+					" packets", qp->name);
+			return r;
+		}
+
+		GCM_LOG_ERR("Unable to reuse existing ring %s for processed"
+				" packets", qp->name);
+		return NULL;
+	}
+
+	return rte_ring_create(qp->name, ring_size, socket_id,
+			RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		const struct rte_cryptodev_qp_conf *qp_conf,
+		 int socket_id)
+{
+	struct aesni_gcm_qp *qp = NULL;
+	struct aesni_gcm_private *internals = dev->data->dev_private;
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		aesni_gcm_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
+					RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL)
+		return (-ENOMEM);
+
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
+		goto qp_setup_cleanup;
+
+	qp->ops = &gcm_ops[internals->vector_mode];
+
+	qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
+			qp_conf->nb_descriptors, socket_id);
+	if (qp->processed_pkts == NULL)
+		goto qp_setup_cleanup;
+
+	qp->sess_mp = dev->data->session_pool;
+
+	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+	return 0;
+
+qp_setup_cleanup:
+	if (qp)
+		rte_free(qp);
+
+	return -1;
+}
+
+/** Start queue pair */
+static int
+aesni_gcm_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint16_t queue_pair_id)
+{
+	return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+aesni_gcm_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint16_t queue_pair_id)
+{
+	return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev)
+{
+	return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni gcm session structure */
+static unsigned
+aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct aesni_gcm_session);
+}
+
+/** Configure a aesni gcm session from a crypto xform chain */
+static void *
+aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_xform *xform,	void *sess)
+{
+	struct aesni_gcm_private *internals = dev->data->dev_private;
+
+	if (unlikely(sess == NULL)) {
+		GCM_LOG_ERR("invalid session struct");
+		return NULL;
+	}
+
+	if (aesni_gcm_set_session_parameters(&gcm_ops[internals->vector_mode],
+			sess, xform) != 0) {
+		GCM_LOG_ERR("failed configure session parameters");
+		return NULL;
+	}
+
+	return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+	if (sess)
+		memset(sess, 0, sizeof(struct aesni_gcm_session));
+}
+
+struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
+		.dev_configure		= aesni_gcm_pmd_config,
+		.dev_start		= aesni_gcm_pmd_start,
+		.dev_stop		= aesni_gcm_pmd_stop,
+		.dev_close		= aesni_gcm_pmd_close,
+
+		.stats_get		= aesni_gcm_pmd_stats_get,
+		.stats_reset		= aesni_gcm_pmd_stats_reset,
+
+		.dev_infos_get		= aesni_gcm_pmd_info_get,
+
+		.queue_pair_setup	= aesni_gcm_pmd_qp_setup,
+		.queue_pair_release	= aesni_gcm_pmd_qp_release,
+		.queue_pair_start	= aesni_gcm_pmd_qp_start,
+		.queue_pair_stop	= aesni_gcm_pmd_qp_stop,
+		.queue_pair_count	= aesni_gcm_pmd_qp_count,
+
+		.session_get_size	= aesni_gcm_pmd_session_get_size,
+		.session_configure	= aesni_gcm_pmd_session_configure,
+		.session_clear		= aesni_gcm_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
new file mode 100644
index 0000000..749c01b
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -0,0 +1,120 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_AESNI_GCM_PMD_PRIVATE_H_
+#define _RTE_AESNI_GCM_PMD_PRIVATE_H_
+
+#include "aesni_gcm_ops.h"
+
+#define GCM_LOG_ERR(fmt, args...) \
+	RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",  \
+			CRYPTODEV_NAME_AESNI_GCM_PMD, \
+			__func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+#define GCM_LOG_INFO(fmt, args...) \
+	RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			CRYPTODEV_NAME_AESNI_GCM_PMD, \
+			__func__, __LINE__, ## args)
+
+#define GCM_LOG_DBG(fmt, args...) \
+	RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			CRYPTODEV_NAME_AESNI_GCM_PMD, \
+			__func__, __LINE__, ## args)
+#else
+#define GCM_LOG_INFO(fmt, args...)
+#define GCM_LOG_DBG(fmt, args...)
+#endif
+
+
+/** private data structure for each virtual AESNI GCM device */
+struct aesni_gcm_private {
+	enum aesni_gcm_vector_mode vector_mode;
+	/**< Vector mode */
+	unsigned max_nb_queue_pairs;
+	/**< Max number of queue pairs supported by device */
+	unsigned max_nb_sessions;
+	/**< Max number of sessions supported by device */
+};
+
+struct aesni_gcm_qp {
+	uint16_t id;
+	/**< Queue Pair Identifier */
+	char name[RTE_CRYPTODEV_NAME_LEN];
+	/**< Unique Queue Pair Name */
+	const struct aesni_gcm_ops *ops;
+	/**< Architecture dependent function pointer table of the gcm APIs */
+	struct rte_ring *processed_pkts;
+	/**< Ring for placing process packets */
+	struct rte_mempool *sess_mp;
+	/**< Session Mempool */
+	struct rte_cryptodev_stats qp_stats;
+	/**< Queue pair statistics */
+} __rte_cache_aligned;
+
+
+enum aesni_gcm_operation {
+	AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION,
+	AESNI_GCM_OP_AUTHENTICATED_DECRYPTION
+};
+
+/** AESNI GCM private session structure */
+struct aesni_gcm_session {
+	enum aesni_gcm_operation op;
+	/**< GCM operation type */
+	struct gcm_data gdata __rte_cache_aligned;
+	/**< GCM parameters */
+};
+
+
+/**
+ * Setup GCM session parameters
+ * @param	ops	gcm ops function pointer table
+ * @param	sess	aesni gcm session structure
+ * @param	xform	crypto transform chain
+ *
+ * @return
+ * - On success returns 0
+ * - On failure returns error code < 0
+ */
+extern int
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops,
+		struct aesni_gcm_session *sess,
+		const struct rte_crypto_xform *xform);
+
+
+/**
+ * Device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops;
+
+
+#endif /* _RTE_AESNI_GCM_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map b/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
new file mode 100644
index 0000000..3871202
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
@@ -0,0 +1,3 @@
+DPDK_2.3 {
+	local: *;
+};
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 892375d..b21b632 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -56,6 +56,8 @@ extern "C" {
 #define CRYPTODEV_NAME_NULL_PMD		("cryptodev_null_pmd")
 /**< Null crypto PMD device name */
 #define CRYPTODEV_NAME_AESNI_MB_PMD	("cryptodev_aesni_mb_pmd")
+/**< Null crypto PMD device name */
+#define CRYPTODEV_NAME_AESNI_GCM_PMD	("cryptodev_aesni_gcm_pmd")
 /**< AES-NI Multi buffer PMD device name */
 #define CRYPTODEV_NAME_QAT_PMD		("cryptodev_qat_pmd")
 /**< Intel QAT PMD device name */
@@ -64,9 +66,12 @@ extern "C" {
 enum rte_cryptodev_type {
 	RTE_CRYPTODEV_NULL_PMD = 1,	/**< Null crypto PMD */
 	RTE_CRYPTODEV_AESNI_MB_PMD,	/**< AES-NI multi buffer PMD */
+	RTE_CRYPTODEV_AESNI_GCM_PMD,	/**< AES-NI GCM PMD */
 	RTE_CRYPTODEV_QAT_PMD,		/**< QAT PMD */
 };
 
+extern const char **rte_cyptodev_names;
+
 /* Logging Macros */
 
 #define CDEV_LOG_ERR(fmt, args...)					\
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 8ecab41..0a0b137 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -154,10 +154,15 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_PCAP)       += -lrte_pmd_pcap
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET)  += -lrte_pmd_af_packet
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL)       += -lrte_pmd_null
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT)        += -lrte_pmd_qat
-
-# AESNI MULTI BUFFER is dependent on the IPSec_MB library
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB)   += -lrte_pmd_aesni_mb
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB)   += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) 	+= -lrte_pmd_aesni_gcm
+
+# AESNI MULTI BUFFER / GSM PMDs are dependent on the IPSec_MB library
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_MB),y)
+_LDLIBS-y 	+= -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+else ifeq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM),y)
+_LDLIBS-y 	+= -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+endif
 
 endif # ! $(CONFIG_RTE_BUILD_SHARED_LIB)
 
-- 
2.5.0

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [PATCH] aesni_gcm: PMD to support AES_GCM crypto operations
  2016-01-30 13:09 [dpdk-dev] [PATCH] aesni_gcm: PMD to support AES_GCM crypto operations Declan Doherty
@ 2016-01-30 16:40 ` O'Driscoll, Tim
  2016-03-08 10:09 ` [dpdk-dev] [PATCH v2] " Pablo de Lara
  1 sibling, 0 replies; 9+ messages in thread
From: O'Driscoll, Tim @ 2016-01-30 16:40 UTC (permalink / raw)
  To: Doherty, Declan, dev


> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Declan Doherty
> Sent: Saturday, January 30, 2016 1:10 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH] aesni_gcm: PMD to support AES_GCM crypto
> operations
> 
> This patch provides the implementation of an AES-NI accelerated crypto
> PMD
> which is dependent on Intel's multi-buffer library, see the white paper
> "Fast Multi-buffer IPsec Implementations on Intel®  Architecture
> Processors"
> 
> This PMD supports AES_GCM authenticated encryption and authenticated
> decryption using
> 128-bit AES keys
> 
> The patch also contains the related unit tests functions for the
> implemented functionality
> 
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> ---
>  MAINTAINERS                                        |   4 +
>  app/test/test_cryptodev.c                          | 462
> +++++++++++++++++++
>  app/test/test_cryptodev_gcm_test_vectors.h         | 423
> +++++++++++++++++
>  config/common_linuxapp                             |  11 +-
>  config/defconfig_i686-native-linuxapp-gcc          |  10 +
>  drivers/crypto/Makefile                            |   1 +
>  drivers/crypto/aesni_gcm/Makefile                  |  66 +++
>  drivers/crypto/aesni_gcm/aesni_gcm_ops.h           | 127 ++++++
>  drivers/crypto/aesni_gcm/aesni_gcm_pmd.c           | 498
> +++++++++++++++++++++
>  drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c       | 292 ++++++++++++
>  drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h   | 120 +++++
>  .../crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map |   3 +
>  lib/librte_cryptodev/rte_cryptodev.h               |   5 +
>  mk/rte.app.mk                                      |  11 +-
>  14 files changed, 2028 insertions(+), 5 deletions(-)
>  create mode 100644 app/test/test_cryptodev_gcm_test_vectors.h
>  create mode 100644 drivers/crypto/aesni_gcm/Makefile
>  create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_ops.h
>  create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
>  create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
>  create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
>  create mode 100644
> drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
> 

There should be an update to the Crypto Device Drivers guide for the new PMD.


Tim

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [PATCH v2] aesni_gcm: PMD to support AES_GCM crypto operations
  2016-01-30 13:09 [dpdk-dev] [PATCH] aesni_gcm: PMD to support AES_GCM crypto operations Declan Doherty
  2016-01-30 16:40 ` O'Driscoll, Tim
@ 2016-03-08 10:09 ` Pablo de Lara
  2016-03-08 11:22   ` De Lara Guarch, Pablo
  2016-03-08 11:26   ` [dpdk-dev] [PATCH v3] " Pablo de Lara
  1 sibling, 2 replies; 9+ messages in thread
From: Pablo de Lara @ 2016-03-08 10:09 UTC (permalink / raw)
  To: dev

From: Declan Doherty <declan.doherty@intel.com>

This patch provides the implementation of an AES-NI accelerated crypto PMD
which is dependent on Intel's multi-buffer library, see the white paper
"Fast Multi-buffer IPsec Implementations on Intel®  Architecture  Processors"

This PMD supports AES_GCM authenticated encryption and authenticated decryption using
128-bit AES keys

The patch also contains the related unit tests functions for the implemented functionality

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
---

This patch depends on "pmd/snow3g: add new SNOW 3G SW PMD" patch
(http://dpdk.org/dev/patchwork/patch/11151/).

Changes in v2:

- Rebased against crypto API changes
- Removed static config options and allow user to provide them
  as virtual device parameters
- Changed DPDK version references from 2.3 to 16.04 
- Added missing library dependency

 MAINTAINERS                                        |   4 +
 app/test/test_cryptodev.c                          | 466 +++++++++++++++++++
 app/test/test_cryptodev_gcm_test_vectors.h         | 423 +++++++++++++++++
 config/common_base                                 |   6 +
 config/defconfig_i686-native-linuxapp-gcc          |  10 +
 config/defconfig_i686-native-linuxapp-icc          |  10 +
 drivers/crypto/Makefile                            |   1 +
 drivers/crypto/aesni_gcm/Makefile                  |  67 +++
 drivers/crypto/aesni_gcm/aesni_gcm_ops.h           | 127 ++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c           | 505 +++++++++++++++++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c       | 292 ++++++++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h   | 120 +++++
 .../crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map |   3 +
 lib/librte_cryptodev/rte_cryptodev.h               |   3 +
 mk/rte.app.mk                                      |  19 +-
 15 files changed, 2052 insertions(+), 4 deletions(-)
 create mode 100644 app/test/test_cryptodev_gcm_test_vectors.h
 create mode 100644 drivers/crypto/aesni_gcm/Makefile
 create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_ops.h
 create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
 create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
 create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
 create mode 100644 drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index c028e67..076757d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -348,6 +348,10 @@ Null PMD
 M: Tetsuya Mukawa <mukawa@igel.co.jp>
 F: drivers/net/null/
 
+Intel AES-NI GCM PMD
+M: Declan Doherty <declan.doherty@intel.com>
+F: drivers/crypto/aesni_gcm/
+
 Intel AES-NI Multi-Buffer
 M: Declan Doherty <declan.doherty@intel.com>
 F: drivers/crypto/aesni_mb/
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 1f822f0..d7e80c4 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -44,6 +44,8 @@
 #include "test_cryptodev.h"
 #include "test_cryptodev_snow3g_test_vectors.h"
 #include "test_cryptodev_snow3g_hash_test_vectors.h"
+#include "test_cryptodev_gcm_test_vectors.h"
+
 static enum rte_cryptodev_type gbl_cryptodev_type;
 
 struct crypto_testsuite_params {
@@ -195,6 +197,21 @@ testsuite_setup(void)
 		}
 	}
 
+	/* Create 2 AESNI GCM devices if required */
+	if (gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_GCM_PMD) {
+		nb_devs = rte_cryptodev_count_devtype(
+				RTE_CRYPTODEV_AESNI_GCM_PMD);
+		if (nb_devs < 2) {
+			for (i = nb_devs; i < 2; i++) {
+				TEST_ASSERT_SUCCESS(rte_eal_vdev_init(
+					CRYPTODEV_NAME_AESNI_GCM_PMD, NULL),
+					"Failed to create instance %u of"
+					" pmd : %s",
+					i, CRYPTODEV_NAME_AESNI_GCM_PMD);
+			}
+		}
+	}
+
 	/* Create 2 Snow3G devices if required */
 	if (gbl_cryptodev_type == RTE_CRYPTODEV_SNOW3G_PMD) {
 		nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_SNOW3G_PMD);
@@ -2761,6 +2778,400 @@ test_snow3g_encrypted_authentication_test_case_1(void)
 /* ***** AES-GCM Tests ***** */
 
 static int
+create_gcm_session(uint8_t dev_id, enum rte_crypto_cipher_operation op,
+		const uint8_t *key, const uint8_t key_len,
+		const uint8_t aad_len, const uint8_t auth_len)
+{
+	uint8_t cipher_key[key_len];
+
+	struct crypto_unittest_params *ut_params = &unittest_params;
+
+
+	memcpy(cipher_key, key, key_len);
+
+	/* Setup Cipher Parameters */
+	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+	ut_params->cipher_xform.next = NULL;
+
+	ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM;
+	ut_params->cipher_xform.cipher.op = op;
+	ut_params->cipher_xform.cipher.key.data = cipher_key;
+	ut_params->cipher_xform.cipher.key.length = key_len;
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "key:", key, key_len);
+#endif
+	/* Setup Authentication Parameters */
+	ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+	ut_params->auth_xform.next = NULL;
+
+	ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_AES_GCM;
+
+	ut_params->auth_xform.auth.digest_length = auth_len;
+	ut_params->auth_xform.auth.add_auth_data_length = aad_len;
+	ut_params->auth_xform.auth.key.length = 0;
+	ut_params->auth_xform.auth.key.data = NULL;
+
+	if (op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+		ut_params->cipher_xform.next = &ut_params->auth_xform;
+
+		/* Create Crypto session*/
+		ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
+				&ut_params->cipher_xform);
+	} else {/* Create Crypto session*/
+		ut_params->auth_xform.next = &ut_params->cipher_xform;
+		ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
+				&ut_params->auth_xform);
+	}
+
+	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
+
+	return 0;
+}
+
+static int
+create_gcm_operation(enum rte_crypto_cipher_operation op,
+		const uint8_t *auth_tag, const unsigned auth_tag_len,
+		const uint8_t *iv, const unsigned iv_len,
+		const uint8_t *aad, const unsigned aad_len,
+		const unsigned data_len, unsigned data_pad_len)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+
+	unsigned iv_pad_len = 0, aad_buffer_len;
+
+	/* Generate Crypto op data structure */
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
+
+	struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+
+
+	sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+			ut_params->ibuf, auth_tag_len);
+	TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+			"no room to append digest");
+	sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+			ut_params->ibuf, data_pad_len);
+	sym_op->auth.digest.length = auth_tag_len;
+
+	if (op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+		rte_memcpy(sym_op->auth.digest.data, auth_tag, auth_tag_len);
+#ifdef RTE_APP_TEST_DEBUG
+		rte_hexdump(stdout, "digest:",
+				ut_params->op->digest.data,
+				ut_params->op->digest.length);
+#endif
+	}
+
+	/* iv */
+	iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
+
+	sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+			ut_params->ibuf, iv_pad_len);
+	TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data, "no room to prepend iv");
+
+	memset(sym_op->cipher.iv.data, 0, iv_pad_len);
+	sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+	sym_op->cipher.iv.length = iv_pad_len;
+
+	rte_memcpy(sym_op->cipher.iv.data, iv, iv_len);
+
+	/* CalcY0 */
+	if (iv_len != 16)
+		sym_op->cipher.iv.data[15] = 1;
+
+	/*
+	 * Always allocate the aad up to the block size.
+	 * The cryptodev API calls out -
+	 *  - the array must be big enough to hold the AAD, plus any
+	 *   space to round this up to the nearest multiple of the
+	 *   block size (16 bytes).
+	 */
+	aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 16);
+
+	sym_op->auth.aad.data = (uint8_t *)rte_pktmbuf_prepend(
+			ut_params->ibuf, aad_buffer_len);
+	TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data,
+			"no room to prepend aad");
+	sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(
+			ut_params->ibuf);
+	sym_op->auth.aad.length = aad_len;
+
+	memset(sym_op->auth.aad.data, 0, aad_buffer_len);
+	rte_memcpy(sym_op->auth.aad.data, aad, aad_len);
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "iv:", ut_params->op->iv.data, iv_pad_len);
+	rte_hexdump(stdout, "aad:",
+			ut_params->op->additional_auth.data, aad_len);
+#endif
+	sym_op->cipher.data.length = data_len;
+	sym_op->cipher.data.offset = aad_buffer_len + iv_pad_len;
+
+	sym_op->auth.data.offset = aad_buffer_len + iv_pad_len;
+	sym_op->auth.data.length = data_len;
+
+	return 0;
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+
+	int retval;
+
+	uint8_t *plaintext, *ciphertext, *auth_tag;
+	uint16_t plaintext_pad_len;
+
+	/* Create GCM session */
+	retval = create_gcm_session(ts_params->valid_devs[0],
+			RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+			tdata->key.data, tdata->key.len,
+			tdata->aad.len, tdata->auth_tag.len);
+	if (retval < 0)
+		return retval;
+
+
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+	/* clear mbuf payload */
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+			rte_pktmbuf_tailroom(ut_params->ibuf));
+
+	/*
+	 * Append data which is padded to a multiple
+	 * of the algorithms block size
+	 */
+	plaintext_pad_len = RTE_ALIGN_CEIL(tdata->plaintext.len, 16);
+
+	plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+			plaintext_pad_len);
+	memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len);
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "plaintext:", plaintext, tdata->plaintext.len);
+#endif
+	/* Create GCM opertaion */
+	retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+			tdata->auth_tag.data, tdata->auth_tag.len,
+			tdata->iv.data, tdata->iv.len,
+			tdata->aad.data, tdata->aad.len,
+			tdata->plaintext.len, plaintext_pad_len);
+	if (retval < 0)
+		return retval;
+
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+	ut_params->op->sym->m_src = ut_params->ibuf;
+
+	/* Process crypto operation */
+	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+			ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
+
+	if (ut_params->op->sym->m_dst) {
+		ciphertext = rte_pktmbuf_mtod(ut_params->op->sym->m_dst,
+				uint8_t *);
+		auth_tag = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_dst,
+				uint8_t *, plaintext_pad_len);
+	} else {
+		ciphertext = plaintext;
+		auth_tag = plaintext + plaintext_pad_len;
+	}
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
+	rte_hexdump(stdout, "auth tag:", auth_tag, tdata->auth_tag.len);
+#endif
+	/* Validate obuf */
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			ciphertext,
+			tdata->ciphertext.data,
+			tdata->ciphertext.len,
+			"GCM Ciphertext data not as expected");
+
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			auth_tag,
+			tdata->auth_tag.data,
+			tdata->auth_tag.len,
+			"GCM Generated auth tag not as expected");
+
+	return 0;
+
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_1(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_1);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_2(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_2);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_3(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_3);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_4(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_4);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_5(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_5);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_6(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_6);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_7(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_7);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+
+	int retval;
+
+	uint8_t *plaintext, *ciphertext;
+	uint16_t ciphertext_pad_len;
+
+	/* Create GCM session */
+	retval = create_gcm_session(ts_params->valid_devs[0],
+			RTE_CRYPTO_CIPHER_OP_DECRYPT,
+			tdata->key.data, tdata->key.len,
+			tdata->aad.len, tdata->auth_tag.len);
+	if (retval < 0)
+		return retval;
+
+
+	/* alloc mbuf and set payload */
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+			rte_pktmbuf_tailroom(ut_params->ibuf));
+
+	ciphertext_pad_len = RTE_ALIGN_CEIL(tdata->ciphertext.len, 16);
+
+	ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+			ciphertext_pad_len);
+	memcpy(ciphertext, tdata->ciphertext.data, tdata->ciphertext.len);
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
+#endif
+	/* Create GCM opertaion */
+	retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_DECRYPT,
+			tdata->auth_tag.data, tdata->auth_tag.len,
+			tdata->iv.data, tdata->iv.len,
+			tdata->aad.data, tdata->aad.len,
+			tdata->ciphertext.len, ciphertext_pad_len);
+	if (retval < 0)
+		return retval;
+
+
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+	ut_params->op->sym->m_src = ut_params->ibuf;
+
+	/* Process crypto operation */
+	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+			ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
+
+	if (ut_params->op->sym->m_dst)
+		plaintext = rte_pktmbuf_mtod(ut_params->op->sym->m_dst,
+				uint8_t *);
+	else
+		plaintext = ciphertext;
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "plaintext:", plaintext, tdata->ciphertext.len);
+#endif
+	/* Validate obuf */
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			plaintext,
+			tdata->plaintext.data,
+			tdata->plaintext.len,
+			"GCM plaintext data not as expected");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status,
+			RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"GCM authentication failed");
+	return 0;
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_1(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_1);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_2(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_2);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_3(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_3);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_4(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_4);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_5(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_5);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_6(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_6);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_7(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_7);
+}
+
+static int
 test_stats(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -3088,6 +3499,47 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite  = {
 	}
 };
 
+static struct unit_test_suite cryptodev_aesni_gcm_testsuite  = {
+	.suite_name = "Crypto Device AESNI GCM Unit Test Suite",
+	.setup = testsuite_setup,
+	.teardown = testsuite_teardown,
+	.unit_test_cases = {
+		/** AES GCM Authenticated Encryption */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_2),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_7),
+
+		/** AES GCM Authenticated Decryption */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_2),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_7),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
+
 static struct unit_test_suite cryptodev_sw_snow3g_testsuite  = {
 	.suite_name = "Crypto Device SW Snow3G Unit Test Suite",
 	.setup = testsuite_setup,
@@ -3163,6 +3615,19 @@ static struct test_command cryptodev_aesni_mb_cmd = {
 };
 
 static int
+test_cryptodev_aesni_gcm(void)
+{
+	gbl_cryptodev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
+
+	return unit_test_suite_runner(&cryptodev_aesni_gcm_testsuite);
+}
+
+static struct test_command cryptodev_aesni_gcm_cmd = {
+	.command = "cryptodev_aesni_gcm_autotest",
+	.callback = test_cryptodev_aesni_gcm,
+};
+
+static int
 test_cryptodev_sw_snow3g(void /*argv __rte_unused, int argc __rte_unused*/)
 {
 	gbl_cryptodev_type = RTE_CRYPTODEV_SNOW3G_PMD;
@@ -3177,4 +3642,5 @@ static struct test_command cryptodev_sw_snow3g_cmd = {
 
 REGISTER_TEST_COMMAND(cryptodev_qat_cmd);
 REGISTER_TEST_COMMAND(cryptodev_aesni_mb_cmd);
+REGISTER_TEST_COMMAND(cryptodev_aesni_gcm_cmd);
 REGISTER_TEST_COMMAND(cryptodev_sw_snow3g_cmd);
diff --git a/app/test/test_cryptodev_gcm_test_vectors.h b/app/test/test_cryptodev_gcm_test_vectors.h
new file mode 100644
index 0000000..8ae22ba
--- /dev/null
+++ b/app/test/test_cryptodev_gcm_test_vectors.h
@@ -0,0 +1,423 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	 * Redistributions of source code must retain the above copyright
+ *	   notice, this list of conditions and the following disclaimer.
+ *	 * Redistributions in binary form must reproduce the above copyright
+ *	   notice, this list of conditions and the following disclaimer in
+ *	   the documentation and/or other materials provided with the
+ *	   distribution.
+ *	 * Neither the name of Intel Corporation nor the names of its
+ *	   contributors may be used to endorse or promote products derived
+ *	   from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEST_CRYPTODEV_GCM_TEST_VECTORS_H_
+#define TEST_CRYPTODEV_GCM_TEST_VECTORS_H_
+
+struct gcm_test_data {
+	struct {
+		uint8_t data[64];
+		unsigned len;
+	} key;
+
+	struct {
+		uint8_t data[64] __rte_aligned(16);
+		unsigned len;
+	} iv;
+
+	struct {
+		uint8_t data[64];
+		unsigned len;
+	} aad;
+
+	struct {
+		uint8_t data[1024];
+		unsigned len;
+	} plaintext;
+
+	struct {
+		uint8_t data[1024];
+		unsigned len;
+	} ciphertext;
+
+	struct {
+		uint8_t data[16];
+		unsigned len;
+	} auth_tag;
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_1 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00 },
+		.len = 12
+	},
+	.aad = {
+		.data = { 0 },
+		.len = 0
+	},
+	.plaintext = {
+		.data = {
+			0x00 },
+		.len = 0
+	},
+	.ciphertext = {
+		.data = {
+			0x00
+		},
+		.len = 0
+	},
+	.auth_tag = {
+		.data = {
+			0x58, 0xe2, 0xfc, 0xce, 0xfa, 0x7e, 0x30, 0x61,
+			0x36, 0x7f, 0x1d, 0x57, 0xa4, 0xe7, 0x45, 0x5a },
+		.len = 16
+	}
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_2 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00 },
+		.len = 12
+	},
+	.aad = {
+		.data = { 0 },
+		.len = 0
+	},
+	.plaintext = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+		.len = 16
+	},
+	.ciphertext = {
+		.data = {
+			0x03, 0x88, 0xda, 0xce, 0x60, 0xb6, 0xa3, 0x92,
+			0xf3, 0x28, 0xc2, 0xb9, 0x71, 0xb2, 0xfe, 0x78 },
+		.len = 16
+	},
+	.auth_tag = {
+		.data = {
+			0xab, 0x6e, 0x47, 0xd4, 0x2c, 0xec, 0x13, 0xbd,
+			0xf5, 0x3a, 0x67, 0xb2, 0x12, 0x57, 0xbd, 0xdf },
+		.len = 16
+	}
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_3 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88 },
+		.len = 12
+	},
+	.aad = {
+		.data = { 0 },
+		.len = 0
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
+		.len = 64
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85
+		},
+		.len = 64
+	},
+	.auth_tag = {
+		.data = {
+			0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6,
+			0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4 },
+		.len = 16
+	}
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_4 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88 },
+		.len = 12
+	},
+	.aad = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+		.len = 8
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39
+		},
+		.len = 60
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91
+		},
+		.len = 60
+	},
+	.auth_tag = {
+		.data = {
+			0xA2, 0xA4, 0x35, 0x75, 0xDC, 0xB0, 0x57, 0x74,
+			0x07, 0x02, 0x30, 0xC2, 0xE7, 0x52, 0x02, 0x00
+		},
+		.len = 16
+	}
+
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_5 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88 },
+		.len = 12
+	},
+	.aad = {
+		.data = {
+			0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef },
+		.len = 8
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39
+		},
+		.len = 60
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91
+		},
+		.len = 60
+	},
+	.auth_tag = {
+		.data = {
+			0xC5, 0x2D, 0xFB, 0x54, 0xAF, 0xBB, 0x07, 0xA1,
+			0x9A, 0xFF, 0xBE, 0xE0, 0x61, 0x4C, 0xE7, 0xA5
+		},
+		.len = 16
+	}
+
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_6 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88
+		},
+		.len = 12
+	},
+	.aad = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00
+		},
+		.len = 12
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39
+		},
+		.len = 60
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91
+		},
+		.len = 60
+	},
+	.auth_tag = {
+		.data = {
+			0x74, 0xFC, 0xFA, 0x29, 0x3E, 0x60, 0xCC, 0x66,
+			0x09, 0xD6, 0xFD, 0x00, 0xC8, 0x86, 0xD5, 0x42
+		},
+		.len = 16
+	}
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_7 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88
+		},
+		.len = 12
+	},
+	.aad = {
+		.data = {
+			0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
+			0xfe, 0xed, 0xfa, 0xce
+		},
+		.len = 12
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39
+		},
+		.len = 60
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91
+		},
+		.len = 60
+	},
+	.auth_tag = {
+		.data = {
+			0xE9, 0xE4, 0xAB, 0x76, 0xB7, 0xFF, 0xEA, 0xDC,
+			0x69, 0x79, 0x38, 0xA2, 0x0D, 0xCA, 0xF5, 0x92
+		},
+		.len = 16
+	}
+};
+
+
+#endif /* TEST_CRYPTODEV_GCM_TEST_VECTORS_H_ */
diff --git a/config/common_base b/config/common_base
index 4202a89..8810809 100644
--- a/config/common_base
+++ b/config/common_base
@@ -337,6 +337,12 @@ CONFIG_RTE_AESNI_MB_PMD_MAX_NB_QUEUE_PAIRS=8
 CONFIG_RTE_AESNI_MB_PMD_MAX_NB_SESSIONS=2048
 
 #
+# Compile PMD for AESNI GCM  device
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
+CONFIG_RTE_LIBRTE_PMD_AESNI_GCM_DEBUG=n
+
+#
 # Compile PMD for SNOW 3G device
 #
 CONFIG_RTE_LIBRTE_PMD_SNOW3G=n
diff --git a/config/defconfig_i686-native-linuxapp-gcc b/config/defconfig_i686-native-linuxapp-gcc
index 290183a..c32859f 100644
--- a/config/defconfig_i686-native-linuxapp-gcc
+++ b/config/defconfig_i686-native-linuxapp-gcc
@@ -50,3 +50,13 @@ CONFIG_RTE_LIBRTE_KNI=n
 # Vectorized PMD is not supported on 32-bit
 #
 CONFIG_RTE_IXGBE_INC_VECTOR=n
+
+#
+# AES-NI multi-buffer PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
+
+#
+# AES-NI GCM PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
diff --git a/config/defconfig_i686-native-linuxapp-icc b/config/defconfig_i686-native-linuxapp-icc
index 96725f3..cde9d96 100644
--- a/config/defconfig_i686-native-linuxapp-icc
+++ b/config/defconfig_i686-native-linuxapp-icc
@@ -50,3 +50,13 @@ CONFIG_RTE_LIBRTE_KNI=n
 # Vectorized PMD is not supported on 32-bit
 #
 CONFIG_RTE_IXGBE_INC_VECTOR=n
+
+#
+# AES-NI multi-buffer PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
+
+#
+# AES-NI GCM PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index bf586d9..021ac0d 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -31,6 +31,7 @@
 
 include $(RTE_SDK)/mk/rte.vars.mk
 
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile
new file mode 100644
index 0000000..aa2621b
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/Makefile
@@ -0,0 +1,67 @@
+#   BSD LICENSE
+#
+#   Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
+$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
+endif
+
+# library name
+LIB = librte_pmd_aesni_gcm.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_aesni_gcm_version.map
+
+# external library include paths
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
+LDLIBS += -lcrypto
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
new file mode 100644
index 0000000..c399068
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -0,0 +1,127 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AESNI_GCM_OPS_H_
+#define _AESNI_GCM_OPS_H_
+
+#ifndef LINUX
+#define LINUX
+#endif
+
+#include <gcm_defines.h>
+#include <aux_funcs.h>
+
+/** Supported vector modes */
+enum aesni_gcm_vector_mode {
+	RTE_AESNI_GCM_NOT_SUPPORTED = 0,
+	RTE_AESNI_GCM_SSE,
+	RTE_AESNI_GCM_AVX,
+	RTE_AESNI_GCM_AVX2
+};
+
+typedef void (*aes_keyexp_128_enc_t)(void *key, void *enc_exp_keys);
+
+typedef void (*aesni_gcm_t)(gcm_data *my_ctx_data, u8 *out, const u8 *in,
+		u64 plaintext_len, u8 *iv, const u8 *aad, u64 aad_len,
+		u8 *auth_tag, u64 auth_tag_len);
+
+typedef void (*aesni_gcm_precomp_t)(gcm_data *my_ctx_data, u8 *hash_subkey);
+
+/** GCM library function pointer table */
+struct aesni_gcm_ops {
+	struct {
+		struct {
+			aes_keyexp_128_enc_t aes128_enc;
+			/**< AES128 enc key expansion */
+		} keyexp;
+		/**< Key expansion functions */
+	} aux; /**< Auxiliary functions */
+
+	struct {
+		aesni_gcm_t enc;	/**< GCM encode function pointer */
+		aesni_gcm_t dec;	/**< GCM decode function pointer */
+		aesni_gcm_precomp_t precomp;	/**< GCM pre-compute */
+	} gcm; /**< GCM functions */
+};
+
+
+static const struct aesni_gcm_ops gcm_ops[] = {
+	[RTE_AESNI_GCM_NOT_SUPPORTED] = {
+		.aux = {
+			.keyexp = {
+				NULL
+			}
+		},
+		.gcm = {
+			NULL
+		}
+	},
+	[RTE_AESNI_GCM_SSE] = {
+		.aux = {
+			.keyexp = {
+				aes_keyexp_128_enc_sse
+			}
+		},
+		.gcm = {
+			aesni_gcm_enc_sse,
+			aesni_gcm_dec_sse,
+			aesni_gcm_precomp_sse
+		}
+	},
+	[RTE_AESNI_GCM_AVX] = {
+		.aux = {
+			.keyexp = {
+				aes_keyexp_128_enc_avx,
+			}
+		},
+		.gcm = {
+			aesni_gcm_enc_avx_gen2,
+			aesni_gcm_dec_avx_gen2,
+			aesni_gcm_precomp_avx_gen2
+		}
+	},
+	[RTE_AESNI_GCM_AVX2] = {
+		.aux = {
+			.keyexp = {
+				aes_keyexp_128_enc_avx2,
+			}
+		},
+		.gcm = {
+			aesni_gcm_enc_avx_gen4,
+			aesni_gcm_dec_avx_gen4,
+			aesni_gcm_precomp_avx_gen4
+		}
+	}
+};
+
+
+#endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
new file mode 100644
index 0000000..11074bb
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -0,0 +1,505 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <openssl/aes.h>
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "aesni_gcm_pmd_private.h"
+
+/**
+ * Global static parameter used to create a unique name for each AES-NI multi
+ * buffer crypto device.
+ */
+static unsigned unique_name_id;
+
+static inline int
+create_unique_device_name(char *name, size_t size)
+{
+	int ret;
+
+	if (name == NULL)
+		return -EINVAL;
+
+	ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_AESNI_GCM_PMD,
+			unique_name_id++);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+static int
+aesni_gcm_calculate_hash_sub_key(uint8_t *hsubkey, unsigned hsubkey_length,
+		uint8_t *aeskey, unsigned aeskey_length)
+{
+	uint8_t key[aeskey_length] __rte_aligned(16);
+	AES_KEY enc_key;
+
+	if (hsubkey_length % 16 != 0 && aeskey_length % 16 != 0)
+		return -EFAULT;
+
+	memcpy(key, aeskey, aeskey_length);
+
+	if (AES_set_encrypt_key(key, aeskey_length << 3, &enc_key) != 0)
+		return -EFAULT;
+
+	AES_encrypt(hsubkey, hsubkey, &enc_key);
+
+	return 0;
+}
+
+/** Get xform chain order */
+static int
+aesni_gcm_get_mode(const struct rte_crypto_sym_xform *xform)
+{
+	/*
+	 * GCM only supports authenticated encryption or authenticated
+	 * decryption, all other options are invalid, so we must have exactly
+	 * 2 xform structs chained together
+	 */
+	if (xform->next == NULL || xform->next->next != NULL)
+		return -1;
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		return AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+	}
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		return AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+	}
+
+	return -1;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
+		struct aesni_gcm_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_sym_xform *auth_xform = NULL;
+	const struct rte_crypto_sym_xform *cipher_xform = NULL;
+
+	uint8_t hsubkey[16] __rte_aligned(16) = { 0 };
+
+	/* Select Crypto operation - hash then cipher / cipher then hash */
+	switch (aesni_gcm_get_mode(xform)) {
+	case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
+		sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+
+		cipher_xform = xform;
+		auth_xform = xform->next;
+		break;
+	case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
+		sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+
+		auth_xform = xform;
+		cipher_xform = xform->next;
+		break;
+	default:
+		GCM_LOG_ERR("Unsupported operation chain order parameter");
+		return -EINVAL;
+	}
+
+	/* We only support AES GCM */
+	if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM &&
+			auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GCM)
+		return -EINVAL;
+
+	/* Select cipher direction */
+	if (sess->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION &&
+			cipher_xform->cipher.op !=
+					RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+		GCM_LOG_ERR("xform chain (CIPHER/AUTH) and cipher operation "
+				"(DECRYPT) specified are an invalid selection");
+		return -EINVAL;
+	} else if (sess->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION &&
+			cipher_xform->cipher.op !=
+					RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+		GCM_LOG_ERR("xform chain (AUTH/CIPHER) and cipher operation "
+				"(ENCRYPT) specified are an invalid selection");
+		return -EINVAL;
+	}
+
+	/* Expand GCM AES128 key */
+	(*gcm_ops->aux.keyexp.aes128_enc)(cipher_xform->cipher.key.data,
+			sess->gdata.expanded_keys);
+
+	/* Calculate hash sub key here */
+	aesni_gcm_calculate_hash_sub_key(hsubkey, sizeof(hsubkey),
+			cipher_xform->cipher.key.data,
+			cipher_xform->cipher.key.length);
+
+	/* Calculate GCM pre-compute */
+	(*gcm_ops->gcm.precomp)(&sess->gdata, hsubkey);
+
+	return 0;
+}
+
+/** Get gcm session */
+static struct aesni_gcm_session *
+aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
+{
+	struct aesni_gcm_session *sess = NULL;
+
+	if (op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+		if (unlikely(op->session->type != RTE_CRYPTODEV_AESNI_GCM_PMD))
+			return sess;
+
+		sess = (struct aesni_gcm_session *)op->session->_private;
+	} else  {
+		void *_sess;
+
+		if (rte_mempool_get(qp->sess_mp, &_sess))
+			return sess;
+
+		sess = (struct aesni_gcm_session *)
+			((struct rte_cryptodev_session *)_sess)->_private;
+
+		if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
+				sess, op->xform) != 0)) {
+			rte_mempool_put(qp->sess_mp, _sess);
+			sess = NULL;
+		}
+	}
+	return sess;
+}
+
+/**
+ * Process a crypto operation and complete a JOB_AES_HMAC job structure for
+ * submission to the multi buffer library for processing.
+ *
+ * @param	qp		queue pair
+ * @param	op		symmetric crypto operation
+ * @param	session		GCM session
+ *
+ * @return
+ *
+ */
+static int
+process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op,
+		struct aesni_gcm_session *session)
+{
+	uint8_t *src, *dst;
+	struct rte_mbuf *m = op->m_src;
+
+	src = rte_pktmbuf_mtod(m, uint8_t *) + op->cipher.data.offset;
+	dst = op->m_dst ?
+			rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
+					op->cipher.data.offset) :
+			rte_pktmbuf_mtod_offset(m, uint8_t *,
+					op->cipher.data.offset);
+
+	/* sanity checks */
+	if (op->cipher.iv.length != 16 && op->cipher.iv.length != 0) {
+		GCM_LOG_ERR("iv");
+		return -1;
+	}
+
+	if (op->auth.aad.length != 12 && op->auth.aad.length != 8 &&
+			op->auth.aad.length != 0) {
+		GCM_LOG_ERR("iv");
+		return -1;
+	}
+
+	if (op->auth.digest.length != 16 &&
+			op->auth.digest.length != 12 &&
+			op->auth.digest.length != 8 &&
+			op->auth.digest.length != 0) {
+		GCM_LOG_ERR("iv");
+		return -1;
+	}
+
+	if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
+
+		(*qp->ops->gcm.enc)(&session->gdata, dst, src,
+				(uint64_t)op->cipher.data.length,
+				op->cipher.iv.data,
+				op->auth.aad.data,
+				(uint64_t)op->auth.aad.length,
+				op->auth.digest.data,
+				(uint64_t)op->auth.digest.length);
+	} else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+		uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(m,
+				op->auth.digest.length);
+
+		if (!auth_tag) {
+			GCM_LOG_ERR("iv");
+			return -1;
+		}
+
+		(*qp->ops->gcm.dec)(&session->gdata, dst, src,
+				(uint64_t)op->cipher.data.length,
+				op->cipher.iv.data,
+				op->auth.aad.data,
+				(uint64_t)op->auth.aad.length,
+				auth_tag,
+				(uint64_t)op->auth.digest.length);
+	} else {
+		GCM_LOG_ERR("iv");
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param job	JOB_AES_HMAC job to process
+ *
+ * @return
+ * - Returns processed mbuf which is trimmed of output digest used in
+ * verification of supplied digest in the case of a HASH_CIPHER operation
+ * - Returns NULL on invalid job
+ */
+static void
+post_process_gcm_crypto_op(struct rte_crypto_op *op)
+{
+	struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+	struct aesni_gcm_session *session =
+		(struct aesni_gcm_session *)op->sym->session->_private;
+
+	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+	/* Verify digest if required */
+	if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+
+		uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
+				m->data_len - op->sym->auth.digest.length);
+
+#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
+		rte_hexdump(stdout, "auth tag (orig):",
+				op->auth.digest.data, op->auth.digest.length);
+		rte_hexdump(stdout, "auth tag (calc):",
+				tag, op->auth.digest.length);
+#endif
+
+		if (memcmp(tag, op->sym->auth.digest.data,
+				op->sym->auth.digest.length) != 0)
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+		/* trim area used for digest from mbuf */
+		rte_pktmbuf_trim(m, op->sym->auth.digest.length);
+	}
+}
+
+/**
+ * Process a completed GCM request
+ *
+ * @param qp		Queue Pair to process
+ * @param job		JOB_AES_HMAC job
+ *
+ * @return
+ * - Number of processed jobs
+ */
+static void
+handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
+		struct rte_crypto_op *op)
+{
+	post_process_gcm_crypto_op(op);
+
+	/* Free session if a session-less crypto op */
+	if (op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+		rte_mempool_put(qp->sess_mp, op->sym->session);
+		op->sym->session = NULL;
+	}
+
+	rte_ring_enqueue(qp->processed_pkts, (void *)op);
+}
+
+static uint16_t
+aesni_gcm_pmd_enqueue_burst(void *queue_pair,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct aesni_gcm_session *sess;
+	struct aesni_gcm_qp *qp = queue_pair;
+
+	int i, retval = 0;
+
+	for (i = 0; i < nb_ops; i++) {
+
+		sess = aesni_gcm_get_session(qp, ops[i]->sym);
+		if (unlikely(sess == NULL)) {
+			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			qp->qp_stats.enqueue_err_count++;
+			break;
+		}
+
+		retval = process_gcm_crypto_op(qp, ops[i]->sym, sess);
+		if (retval < 0) {
+			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			qp->qp_stats.enqueue_err_count++;
+			break;
+		}
+
+		handle_completed_gcm_crypto_op(qp, ops[i]);
+
+		qp->qp_stats.enqueued_count++;
+	}
+	return i;
+}
+
+static uint16_t
+aesni_gcm_pmd_dequeue_burst(void *queue_pair,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct aesni_gcm_qp *qp = queue_pair;
+
+	unsigned nb_dequeued;
+
+	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+			(void **)ops, nb_ops);
+	qp->qp_stats.dequeued_count += nb_dequeued;
+
+	return nb_dequeued;
+}
+
+static int aesni_gcm_uninit(const char *name);
+
+static int
+aesni_gcm_create(const char *name,
+		struct rte_crypto_vdev_init_params *init_params)
+{
+	struct rte_cryptodev *dev;
+	char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct aesni_gcm_private *internals;
+	enum aesni_gcm_vector_mode vector_mode;
+
+	/* Check CPU for support for AES instruction set */
+	if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+		GCM_LOG_ERR("AES instructions not supported by CPU");
+		return -EFAULT;
+	}
+
+	/* Check CPU for supported vector instruction set */
+	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+		vector_mode = RTE_AESNI_GCM_AVX2;
+	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+		vector_mode = RTE_AESNI_GCM_AVX;
+	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+		vector_mode = RTE_AESNI_GCM_SSE;
+	else {
+		GCM_LOG_ERR("Vector instructions are not supported by CPU");
+		return -EFAULT;
+	}
+
+	/* create a unique device name */
+	if (create_unique_device_name(crypto_dev_name,
+			RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
+		GCM_LOG_ERR("failed to create unique cryptodev name");
+		return -EINVAL;
+	}
+
+
+	dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+			sizeof(struct aesni_gcm_private), init_params->socket_id);
+	if (dev == NULL) {
+		GCM_LOG_ERR("failed to create cryptodev vdev");
+		goto init_error;
+	}
+
+	dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
+	dev->dev_ops = rte_aesni_gcm_pmd_ops;
+
+	/* register rx/tx burst functions for data path */
+	dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
+	dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
+
+	/* Set vector instructions mode supported */
+	internals = dev->data->dev_private;
+
+	internals->vector_mode = vector_mode;
+
+	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+	internals->max_nb_sessions = init_params->max_nb_sessions;
+
+	return 0;
+
+init_error:
+	GCM_LOG_ERR("driver %s: create failed", name);
+
+	aesni_gcm_uninit(crypto_dev_name);
+	return -EFAULT;
+}
+
+static int
+aesni_gcm_init(const char *name, const char *input_args)
+{
+	struct rte_crypto_vdev_init_params init_params = {
+		RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+		RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+		rte_socket_id()
+	};
+
+	rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+	RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+			init_params.socket_id);
+	RTE_LOG(INFO, PMD, "  Max number of queue pairs = %d\n",
+			init_params.max_nb_queue_pairs);
+	RTE_LOG(INFO, PMD, "  Max number of sessions = %d\n",
+			init_params.max_nb_sessions);
+
+	return aesni_gcm_create(name, &init_params);
+}
+
+static int
+aesni_gcm_uninit(const char *name)
+{
+	if (name == NULL)
+		return -EINVAL;
+
+	GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n",
+			name, rte_socket_id());
+
+	return 0;
+}
+
+static struct rte_driver aesni_gcm_pmd_drv = {
+	.name = CRYPTODEV_NAME_AESNI_GCM_PMD,
+	.type = PMD_VDEV,
+	.init = aesni_gcm_init,
+	.uninit = aesni_gcm_uninit
+};
+
+PMD_REGISTER_DRIVER(aesni_gcm_pmd_drv);
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
new file mode 100644
index 0000000..f865e0d
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -0,0 +1,292 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "aesni_gcm_pmd_private.h"
+
+/** Configure device */
+static int
+aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+/** Start device */
+static int
+aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+/** Stop device */
+static void
+aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+
+/** Get device statistics */
+static void
+aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->qp_stats.enqueued_count;
+		stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+	}
+}
+
+/** Reset device statistics */
+static void
+aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	}
+}
+
+
+/** Get device info */
+static void
+aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_info *dev_info)
+{
+	struct aesni_gcm_private *internals = dev->data->dev_private;
+
+	if (dev_info != NULL) {
+		dev_info->dev_type = dev->dev_type;
+
+		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+		dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+	}
+}
+
+/** Release queue pair */
+static int
+aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	if (dev->data->queue_pairs[qp_id] != NULL) {
+		rte_free(dev->data->queue_pairs[qp_id]);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+		struct aesni_gcm_qp *qp)
+{
+	unsigned n = snprintf(qp->name, sizeof(qp->name),
+			"aesni_gcm_pmd_%u_qp_%u",
+			dev->data->dev_id, qp->id);
+
+	if (n > sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
+		unsigned ring_size, int socket_id)
+{
+	struct rte_ring *r;
+
+	r = rte_ring_lookup(qp->name);
+	if (r) {
+		if (r->prod.size >= ring_size) {
+			GCM_LOG_INFO("Reusing existing ring %s for processed"
+					" packets", qp->name);
+			return r;
+		}
+
+		GCM_LOG_ERR("Unable to reuse existing ring %s for processed"
+				" packets", qp->name);
+		return NULL;
+	}
+
+	return rte_ring_create(qp->name, ring_size, socket_id,
+			RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		const struct rte_cryptodev_qp_conf *qp_conf,
+		 int socket_id)
+{
+	struct aesni_gcm_qp *qp = NULL;
+	struct aesni_gcm_private *internals = dev->data->dev_private;
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		aesni_gcm_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
+					RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL)
+		return (-ENOMEM);
+
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
+		goto qp_setup_cleanup;
+
+	qp->ops = &gcm_ops[internals->vector_mode];
+
+	qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
+			qp_conf->nb_descriptors, socket_id);
+	if (qp->processed_pkts == NULL)
+		goto qp_setup_cleanup;
+
+	qp->sess_mp = dev->data->session_pool;
+
+	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+	return 0;
+
+qp_setup_cleanup:
+	if (qp)
+		rte_free(qp);
+
+	return -1;
+}
+
+/** Start queue pair */
+static int
+aesni_gcm_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint16_t queue_pair_id)
+{
+	return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+aesni_gcm_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint16_t queue_pair_id)
+{
+	return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev)
+{
+	return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni gcm session structure */
+static unsigned
+aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct aesni_gcm_session);
+}
+
+/** Configure a aesni gcm session from a crypto xform chain */
+static void *
+aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_sym_xform *xform,	void *sess)
+{
+	struct aesni_gcm_private *internals = dev->data->dev_private;
+
+	if (unlikely(sess == NULL)) {
+		GCM_LOG_ERR("invalid session struct");
+		return NULL;
+	}
+
+	if (aesni_gcm_set_session_parameters(&gcm_ops[internals->vector_mode],
+			sess, xform) != 0) {
+		GCM_LOG_ERR("failed configure session parameters");
+		return NULL;
+	}
+
+	return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+	if (sess)
+		memset(sess, 0, sizeof(struct aesni_gcm_session));
+}
+
+struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
+		.dev_configure		= aesni_gcm_pmd_config,
+		.dev_start		= aesni_gcm_pmd_start,
+		.dev_stop		= aesni_gcm_pmd_stop,
+		.dev_close		= aesni_gcm_pmd_close,
+
+		.stats_get		= aesni_gcm_pmd_stats_get,
+		.stats_reset		= aesni_gcm_pmd_stats_reset,
+
+		.dev_infos_get		= aesni_gcm_pmd_info_get,
+
+		.queue_pair_setup	= aesni_gcm_pmd_qp_setup,
+		.queue_pair_release	= aesni_gcm_pmd_qp_release,
+		.queue_pair_start	= aesni_gcm_pmd_qp_start,
+		.queue_pair_stop	= aesni_gcm_pmd_qp_stop,
+		.queue_pair_count	= aesni_gcm_pmd_qp_count,
+
+		.session_get_size	= aesni_gcm_pmd_session_get_size,
+		.session_configure	= aesni_gcm_pmd_session_configure,
+		.session_clear		= aesni_gcm_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
new file mode 100644
index 0000000..a42f941
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -0,0 +1,120 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_AESNI_GCM_PMD_PRIVATE_H_
+#define _RTE_AESNI_GCM_PMD_PRIVATE_H_
+
+#include "aesni_gcm_ops.h"
+
+#define GCM_LOG_ERR(fmt, args...) \
+	RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",  \
+			CRYPTODEV_NAME_AESNI_GCM_PMD, \
+			__func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+#define GCM_LOG_INFO(fmt, args...) \
+	RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			CRYPTODEV_NAME_AESNI_GCM_PMD, \
+			__func__, __LINE__, ## args)
+
+#define GCM_LOG_DBG(fmt, args...) \
+	RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			CRYPTODEV_NAME_AESNI_GCM_PMD, \
+			__func__, __LINE__, ## args)
+#else
+#define GCM_LOG_INFO(fmt, args...)
+#define GCM_LOG_DBG(fmt, args...)
+#endif
+
+
+/** private data structure for each virtual AESNI GCM device */
+struct aesni_gcm_private {
+	enum aesni_gcm_vector_mode vector_mode;
+	/**< Vector mode */
+	unsigned max_nb_queue_pairs;
+	/**< Max number of queue pairs supported by device */
+	unsigned max_nb_sessions;
+	/**< Max number of sessions supported by device */
+};
+
+struct aesni_gcm_qp {
+	uint16_t id;
+	/**< Queue Pair Identifier */
+	char name[RTE_CRYPTODEV_NAME_LEN];
+	/**< Unique Queue Pair Name */
+	const struct aesni_gcm_ops *ops;
+	/**< Architecture dependent function pointer table of the gcm APIs */
+	struct rte_ring *processed_pkts;
+	/**< Ring for placing process packets */
+	struct rte_mempool *sess_mp;
+	/**< Session Mempool */
+	struct rte_cryptodev_stats qp_stats;
+	/**< Queue pair statistics */
+} __rte_cache_aligned;
+
+
+enum aesni_gcm_operation {
+	AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION,
+	AESNI_GCM_OP_AUTHENTICATED_DECRYPTION
+};
+
+/** AESNI GCM private session structure */
+struct aesni_gcm_session {
+	enum aesni_gcm_operation op;
+	/**< GCM operation type */
+	struct gcm_data gdata __rte_cache_aligned;
+	/**< GCM parameters */
+};
+
+
+/**
+ * Setup GCM session parameters
+ * @param	ops	gcm ops function pointer table
+ * @param	sess	aesni gcm session structure
+ * @param	xform	crypto transform chain
+ *
+ * @return
+ * - On success returns 0
+ * - On failure returns error code < 0
+ */
+extern int
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops,
+		struct aesni_gcm_session *sess,
+		const struct rte_crypto_sym_xform *xform);
+
+
+/**
+ * Device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops;
+
+
+#endif /* _RTE_AESNI_GCM_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map b/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
new file mode 100644
index 0000000..dc4d417
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
@@ -0,0 +1,3 @@
+DPDK_16.04 {
+	local: *;
+};
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index f279c92..c6c62e7 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -56,6 +56,8 @@ extern "C" {
 /**< Null crypto PMD device name */
 #define CRYPTODEV_NAME_AESNI_MB_PMD	("cryptodev_aesni_mb_pmd")
 /**< AES-NI Multi buffer PMD device name */
+#define CRYPTODEV_NAME_AESNI_GCM_PMD	("cryptodev_aesni_gcm_pmd")
+/**< AES-NI GCM PMD device name */
 #define CRYPTODEV_NAME_QAT_SYM_PMD	("cryptodev_qat_sym_pmd")
 /**< Intel QAT Symmetric Crypto PMD device name */
 #define CRYPTODEV_NAME_SNOW3G_PMD	("cryptodev_snow3g_pmd")
@@ -64,6 +66,7 @@ extern "C" {
 /** Crypto device type */
 enum rte_cryptodev_type {
 	RTE_CRYPTODEV_NULL_PMD = 1,	/**< Null crypto PMD */
+	RTE_CRYPTODEV_AESNI_GCM_PMD,	/**< AES-NI GCM PMD */
 	RTE_CRYPTODEV_AESNI_MB_PMD,	/**< AES-NI multi buffer PMD */
 	RTE_CRYPTODEV_QAT_SYM_PMD,	/**< QAT PMD Symmetric Crypto */
 	RTE_CRYPTODEV_SNOW3G_PMD,	/**< SNOW 3G PMD */
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 7e46370..0725f12 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -102,8 +102,13 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD)       += -libverbs
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2)   += -lsze2
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT)    += -lxenstore
 _LDLIBS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD)      += -lgxio
-# QAT PMD has a dependency on libcrypto (from openssl) for calculating HMAC precomputes
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT)        += -lcrypto
+# QAT / AESNI GCM PMDs are dependent on libcrypto (from openssl)
+# for calculating HMAC precomputes
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT),y)
+_LDLIBS-y                                   += -lcrypto
+else ifeq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM),y)
+_LDLIBS-y                                   += -lcrypto
+endif
 endif # !CONFIG_RTE_BUILD_SHARED_LIBS
 
 _LDLIBS-y += --start-group
@@ -146,9 +151,15 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET)  += -lrte_pmd_af_packet
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL)       += -lrte_pmd_null
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT)        += -lrte_pmd_qat
 
-# AESNI MULTI BUFFER is dependent on the IPSec_MB library
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB)   += -lrte_pmd_aesni_mb
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB)   += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM)   += -lrte_pmd_aesni_gcm
+
+# AESNI MULTI BUFFER / GCM PMDs are dependent on the IPSec_MB library
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_MB),y)
+_LDLIBS-y += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+else ifeq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM),y)
+_LDLIBS-y += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+endif
 
 # SNOW3G PMD is dependent on the LIBSSO library
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G)     += -lrte_pmd_snow3g
-- 
2.5.0

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [PATCH v2] aesni_gcm: PMD to support AES_GCM crypto operations
  2016-03-08 10:09 ` [dpdk-dev] [PATCH v2] " Pablo de Lara
@ 2016-03-08 11:22   ` De Lara Guarch, Pablo
  2016-03-08 11:26   ` [dpdk-dev] [PATCH v3] " Pablo de Lara
  1 sibling, 0 replies; 9+ messages in thread
From: De Lara Guarch, Pablo @ 2016-03-08 11:22 UTC (permalink / raw)
  To: De Lara Guarch, Pablo, dev



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Pablo de Lara
> Sent: Tuesday, March 08, 2016 10:09 AM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v2] aesni_gcm: PMD to support AES_GCM crypto
> operations
> 
> From: Declan Doherty <declan.doherty@intel.com>
> 
> This patch provides the implementation of an AES-NI accelerated crypto PMD
> which is dependent on Intel's multi-buffer library, see the white paper
> "Fast Multi-buffer IPsec Implementations on Intel®  Architecture  Processors"
> 
> This PMD supports AES_GCM authenticated encryption and authenticated
> decryption using
> 128-bit AES keys
> 
> The patch also contains the related unit tests functions for the implemented
> functionality
> 
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> ---
> 
> This patch depends on "pmd/snow3g: add new SNOW 3G SW PMD" patch
> (http://dpdk.org/dev/patchwork/patch/11151/).
> 
> Changes in v2:
> 
> - Rebased against crypto API changes
> - Removed static config options and allow user to provide them
>   as virtual device parameters
> - Changed DPDK version references from 2.3 to 16.04
> - Added missing library dependency
> 
>  MAINTAINERS                                        |   4 +
>  app/test/test_cryptodev.c                          | 466 +++++++++++++++++++
>  app/test/test_cryptodev_gcm_test_vectors.h         | 423 +++++++++++++++++
>  config/common_base                                 |   6 +
>  config/defconfig_i686-native-linuxapp-gcc          |  10 +
>  config/defconfig_i686-native-linuxapp-icc          |  10 +
>  drivers/crypto/Makefile                            |   1 +
>  drivers/crypto/aesni_gcm/Makefile                  |  67 +++
>  drivers/crypto/aesni_gcm/aesni_gcm_ops.h           | 127 ++++++
>  drivers/crypto/aesni_gcm/aesni_gcm_pmd.c           | 505
> +++++++++++++++++++++
>  drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c       | 292 ++++++++++++
>  drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h   | 120 +++++
>  .../crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map |   3 +
>  lib/librte_cryptodev/rte_cryptodev.h               |   3 +
>  mk/rte.app.mk                                      |  19 +-
>  15 files changed, 2052 insertions(+), 4 deletions(-)
>  create mode 100644 app/test/test_cryptodev_gcm_test_vectors.h
>  create mode 100644 drivers/crypto/aesni_gcm/Makefile
>  create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_ops.h
>  create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
>  create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
>  create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
>  create mode 100644
> drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
>

NACK, some error messages are wrong.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [PATCH v3] aesni_gcm: PMD to support AES_GCM crypto operations
  2016-03-08 10:09 ` [dpdk-dev] [PATCH v2] " Pablo de Lara
  2016-03-08 11:22   ` De Lara Guarch, Pablo
@ 2016-03-08 11:26   ` Pablo de Lara
  2016-03-10 16:41     ` [dpdk-dev] [PATCH v4] " Pablo de Lara
  1 sibling, 1 reply; 9+ messages in thread
From: Pablo de Lara @ 2016-03-08 11:26 UTC (permalink / raw)
  To: dev

From: Declan Doherty <declan.doherty@intel.com>

This patch provides the implementation of an AES-NI accelerated crypto PMD
which is dependent on Intel's multi-buffer library, see the white paper
"Fast Multi-buffer IPsec Implementations on Intel®  Architecture  Processors"

This PMD supports AES_GCM authenticated encryption and authenticated decryption using
128-bit AES keys

The patch also contains the related unit tests functions for the implemented functionality

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---

This patch depends on "pmd/snow3g: add new SNOW 3G SW PMD" patch
(http://dpdk.org/dev/patchwork/patch/11151/).

Changes in v3:
- Fixed incorrect error messages

Changes in v2:

- Rebased against crypto API changes
- Removed static config options and allow user to provide them
  as virtual device parameters
- Changed DPDK version references from 2.3 to 16.04 
- Added missing library dependency

 MAINTAINERS                                        |   4 +
 app/test/test_cryptodev.c                          | 466 +++++++++++++++++++
 app/test/test_cryptodev_gcm_test_vectors.h         | 423 +++++++++++++++++
 config/common_base                                 |   6 +
 config/defconfig_i686-native-linuxapp-gcc          |  10 +
 config/defconfig_i686-native-linuxapp-icc          |  10 +
 drivers/crypto/Makefile                            |   1 +
 drivers/crypto/aesni_gcm/Makefile                  |  67 +++
 drivers/crypto/aesni_gcm/aesni_gcm_ops.h           | 127 ++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c           | 505 +++++++++++++++++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c       | 292 ++++++++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h   | 120 +++++
 .../crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map |   3 +
 lib/librte_cryptodev/rte_cryptodev.h               |   3 +
 mk/rte.app.mk                                      |  19 +-
 15 files changed, 2052 insertions(+), 4 deletions(-)
 create mode 100644 app/test/test_cryptodev_gcm_test_vectors.h
 create mode 100644 drivers/crypto/aesni_gcm/Makefile
 create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_ops.h
 create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
 create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
 create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
 create mode 100644 drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index c028e67..076757d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -348,6 +348,10 @@ Null PMD
 M: Tetsuya Mukawa <mukawa@igel.co.jp>
 F: drivers/net/null/
 
+Intel AES-NI GCM PMD
+M: Declan Doherty <declan.doherty@intel.com>
+F: drivers/crypto/aesni_gcm/
+
 Intel AES-NI Multi-Buffer
 M: Declan Doherty <declan.doherty@intel.com>
 F: drivers/crypto/aesni_mb/
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 1f822f0..d7e80c4 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -44,6 +44,8 @@
 #include "test_cryptodev.h"
 #include "test_cryptodev_snow3g_test_vectors.h"
 #include "test_cryptodev_snow3g_hash_test_vectors.h"
+#include "test_cryptodev_gcm_test_vectors.h"
+
 static enum rte_cryptodev_type gbl_cryptodev_type;
 
 struct crypto_testsuite_params {
@@ -195,6 +197,21 @@ testsuite_setup(void)
 		}
 	}
 
+	/* Create 2 AESNI GCM devices if required */
+	if (gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_GCM_PMD) {
+		nb_devs = rte_cryptodev_count_devtype(
+				RTE_CRYPTODEV_AESNI_GCM_PMD);
+		if (nb_devs < 2) {
+			for (i = nb_devs; i < 2; i++) {
+				TEST_ASSERT_SUCCESS(rte_eal_vdev_init(
+					CRYPTODEV_NAME_AESNI_GCM_PMD, NULL),
+					"Failed to create instance %u of"
+					" pmd : %s",
+					i, CRYPTODEV_NAME_AESNI_GCM_PMD);
+			}
+		}
+	}
+
 	/* Create 2 Snow3G devices if required */
 	if (gbl_cryptodev_type == RTE_CRYPTODEV_SNOW3G_PMD) {
 		nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_SNOW3G_PMD);
@@ -2761,6 +2778,400 @@ test_snow3g_encrypted_authentication_test_case_1(void)
 /* ***** AES-GCM Tests ***** */
 
 static int
+create_gcm_session(uint8_t dev_id, enum rte_crypto_cipher_operation op,
+		const uint8_t *key, const uint8_t key_len,
+		const uint8_t aad_len, const uint8_t auth_len)
+{
+	uint8_t cipher_key[key_len];
+
+	struct crypto_unittest_params *ut_params = &unittest_params;
+
+
+	memcpy(cipher_key, key, key_len);
+
+	/* Setup Cipher Parameters */
+	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+	ut_params->cipher_xform.next = NULL;
+
+	ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM;
+	ut_params->cipher_xform.cipher.op = op;
+	ut_params->cipher_xform.cipher.key.data = cipher_key;
+	ut_params->cipher_xform.cipher.key.length = key_len;
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "key:", key, key_len);
+#endif
+	/* Setup Authentication Parameters */
+	ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+	ut_params->auth_xform.next = NULL;
+
+	ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_AES_GCM;
+
+	ut_params->auth_xform.auth.digest_length = auth_len;
+	ut_params->auth_xform.auth.add_auth_data_length = aad_len;
+	ut_params->auth_xform.auth.key.length = 0;
+	ut_params->auth_xform.auth.key.data = NULL;
+
+	if (op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+		ut_params->cipher_xform.next = &ut_params->auth_xform;
+
+		/* Create Crypto session*/
+		ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
+				&ut_params->cipher_xform);
+	} else {/* Create Crypto session*/
+		ut_params->auth_xform.next = &ut_params->cipher_xform;
+		ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
+				&ut_params->auth_xform);
+	}
+
+	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
+
+	return 0;
+}
+
+static int
+create_gcm_operation(enum rte_crypto_cipher_operation op,
+		const uint8_t *auth_tag, const unsigned auth_tag_len,
+		const uint8_t *iv, const unsigned iv_len,
+		const uint8_t *aad, const unsigned aad_len,
+		const unsigned data_len, unsigned data_pad_len)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+
+	unsigned iv_pad_len = 0, aad_buffer_len;
+
+	/* Generate Crypto op data structure */
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
+
+	struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+
+
+	sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+			ut_params->ibuf, auth_tag_len);
+	TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+			"no room to append digest");
+	sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+			ut_params->ibuf, data_pad_len);
+	sym_op->auth.digest.length = auth_tag_len;
+
+	if (op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+		rte_memcpy(sym_op->auth.digest.data, auth_tag, auth_tag_len);
+#ifdef RTE_APP_TEST_DEBUG
+		rte_hexdump(stdout, "digest:",
+				ut_params->op->digest.data,
+				ut_params->op->digest.length);
+#endif
+	}
+
+	/* iv */
+	iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
+
+	sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+			ut_params->ibuf, iv_pad_len);
+	TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data, "no room to prepend iv");
+
+	memset(sym_op->cipher.iv.data, 0, iv_pad_len);
+	sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+	sym_op->cipher.iv.length = iv_pad_len;
+
+	rte_memcpy(sym_op->cipher.iv.data, iv, iv_len);
+
+	/* CalcY0 */
+	if (iv_len != 16)
+		sym_op->cipher.iv.data[15] = 1;
+
+	/*
+	 * Always allocate the aad up to the block size.
+	 * The cryptodev API calls out -
+	 *  - the array must be big enough to hold the AAD, plus any
+	 *   space to round this up to the nearest multiple of the
+	 *   block size (16 bytes).
+	 */
+	aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 16);
+
+	sym_op->auth.aad.data = (uint8_t *)rte_pktmbuf_prepend(
+			ut_params->ibuf, aad_buffer_len);
+	TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data,
+			"no room to prepend aad");
+	sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(
+			ut_params->ibuf);
+	sym_op->auth.aad.length = aad_len;
+
+	memset(sym_op->auth.aad.data, 0, aad_buffer_len);
+	rte_memcpy(sym_op->auth.aad.data, aad, aad_len);
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "iv:", ut_params->op->iv.data, iv_pad_len);
+	rte_hexdump(stdout, "aad:",
+			ut_params->op->additional_auth.data, aad_len);
+#endif
+	sym_op->cipher.data.length = data_len;
+	sym_op->cipher.data.offset = aad_buffer_len + iv_pad_len;
+
+	sym_op->auth.data.offset = aad_buffer_len + iv_pad_len;
+	sym_op->auth.data.length = data_len;
+
+	return 0;
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+
+	int retval;
+
+	uint8_t *plaintext, *ciphertext, *auth_tag;
+	uint16_t plaintext_pad_len;
+
+	/* Create GCM session */
+	retval = create_gcm_session(ts_params->valid_devs[0],
+			RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+			tdata->key.data, tdata->key.len,
+			tdata->aad.len, tdata->auth_tag.len);
+	if (retval < 0)
+		return retval;
+
+
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+	/* clear mbuf payload */
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+			rte_pktmbuf_tailroom(ut_params->ibuf));
+
+	/*
+	 * Append data which is padded to a multiple
+	 * of the algorithms block size
+	 */
+	plaintext_pad_len = RTE_ALIGN_CEIL(tdata->plaintext.len, 16);
+
+	plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+			plaintext_pad_len);
+	memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len);
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "plaintext:", plaintext, tdata->plaintext.len);
+#endif
+	/* Create GCM opertaion */
+	retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+			tdata->auth_tag.data, tdata->auth_tag.len,
+			tdata->iv.data, tdata->iv.len,
+			tdata->aad.data, tdata->aad.len,
+			tdata->plaintext.len, plaintext_pad_len);
+	if (retval < 0)
+		return retval;
+
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+	ut_params->op->sym->m_src = ut_params->ibuf;
+
+	/* Process crypto operation */
+	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+			ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
+
+	if (ut_params->op->sym->m_dst) {
+		ciphertext = rte_pktmbuf_mtod(ut_params->op->sym->m_dst,
+				uint8_t *);
+		auth_tag = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_dst,
+				uint8_t *, plaintext_pad_len);
+	} else {
+		ciphertext = plaintext;
+		auth_tag = plaintext + plaintext_pad_len;
+	}
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
+	rte_hexdump(stdout, "auth tag:", auth_tag, tdata->auth_tag.len);
+#endif
+	/* Validate obuf */
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			ciphertext,
+			tdata->ciphertext.data,
+			tdata->ciphertext.len,
+			"GCM Ciphertext data not as expected");
+
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			auth_tag,
+			tdata->auth_tag.data,
+			tdata->auth_tag.len,
+			"GCM Generated auth tag not as expected");
+
+	return 0;
+
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_1(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_1);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_2(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_2);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_3(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_3);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_4(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_4);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_5(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_5);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_6(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_6);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_7(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_7);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+
+	int retval;
+
+	uint8_t *plaintext, *ciphertext;
+	uint16_t ciphertext_pad_len;
+
+	/* Create GCM session */
+	retval = create_gcm_session(ts_params->valid_devs[0],
+			RTE_CRYPTO_CIPHER_OP_DECRYPT,
+			tdata->key.data, tdata->key.len,
+			tdata->aad.len, tdata->auth_tag.len);
+	if (retval < 0)
+		return retval;
+
+
+	/* alloc mbuf and set payload */
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+			rte_pktmbuf_tailroom(ut_params->ibuf));
+
+	ciphertext_pad_len = RTE_ALIGN_CEIL(tdata->ciphertext.len, 16);
+
+	ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+			ciphertext_pad_len);
+	memcpy(ciphertext, tdata->ciphertext.data, tdata->ciphertext.len);
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
+#endif
+	/* Create GCM opertaion */
+	retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_DECRYPT,
+			tdata->auth_tag.data, tdata->auth_tag.len,
+			tdata->iv.data, tdata->iv.len,
+			tdata->aad.data, tdata->aad.len,
+			tdata->ciphertext.len, ciphertext_pad_len);
+	if (retval < 0)
+		return retval;
+
+
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+	ut_params->op->sym->m_src = ut_params->ibuf;
+
+	/* Process crypto operation */
+	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+			ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
+
+	if (ut_params->op->sym->m_dst)
+		plaintext = rte_pktmbuf_mtod(ut_params->op->sym->m_dst,
+				uint8_t *);
+	else
+		plaintext = ciphertext;
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "plaintext:", plaintext, tdata->ciphertext.len);
+#endif
+	/* Validate obuf */
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			plaintext,
+			tdata->plaintext.data,
+			tdata->plaintext.len,
+			"GCM plaintext data not as expected");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status,
+			RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"GCM authentication failed");
+	return 0;
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_1(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_1);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_2(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_2);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_3(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_3);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_4(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_4);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_5(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_5);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_6(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_6);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_7(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_7);
+}
+
+static int
 test_stats(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -3088,6 +3499,47 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite  = {
 	}
 };
 
+static struct unit_test_suite cryptodev_aesni_gcm_testsuite  = {
+	.suite_name = "Crypto Device AESNI GCM Unit Test Suite",
+	.setup = testsuite_setup,
+	.teardown = testsuite_teardown,
+	.unit_test_cases = {
+		/** AES GCM Authenticated Encryption */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_2),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_7),
+
+		/** AES GCM Authenticated Decryption */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_2),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_7),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
+
 static struct unit_test_suite cryptodev_sw_snow3g_testsuite  = {
 	.suite_name = "Crypto Device SW Snow3G Unit Test Suite",
 	.setup = testsuite_setup,
@@ -3163,6 +3615,19 @@ static struct test_command cryptodev_aesni_mb_cmd = {
 };
 
 static int
+test_cryptodev_aesni_gcm(void)
+{
+	gbl_cryptodev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
+
+	return unit_test_suite_runner(&cryptodev_aesni_gcm_testsuite);
+}
+
+static struct test_command cryptodev_aesni_gcm_cmd = {
+	.command = "cryptodev_aesni_gcm_autotest",
+	.callback = test_cryptodev_aesni_gcm,
+};
+
+static int
 test_cryptodev_sw_snow3g(void /*argv __rte_unused, int argc __rte_unused*/)
 {
 	gbl_cryptodev_type = RTE_CRYPTODEV_SNOW3G_PMD;
@@ -3177,4 +3642,5 @@ static struct test_command cryptodev_sw_snow3g_cmd = {
 
 REGISTER_TEST_COMMAND(cryptodev_qat_cmd);
 REGISTER_TEST_COMMAND(cryptodev_aesni_mb_cmd);
+REGISTER_TEST_COMMAND(cryptodev_aesni_gcm_cmd);
 REGISTER_TEST_COMMAND(cryptodev_sw_snow3g_cmd);
diff --git a/app/test/test_cryptodev_gcm_test_vectors.h b/app/test/test_cryptodev_gcm_test_vectors.h
new file mode 100644
index 0000000..8ae22ba
--- /dev/null
+++ b/app/test/test_cryptodev_gcm_test_vectors.h
@@ -0,0 +1,423 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	 * Redistributions of source code must retain the above copyright
+ *	   notice, this list of conditions and the following disclaimer.
+ *	 * Redistributions in binary form must reproduce the above copyright
+ *	   notice, this list of conditions and the following disclaimer in
+ *	   the documentation and/or other materials provided with the
+ *	   distribution.
+ *	 * Neither the name of Intel Corporation nor the names of its
+ *	   contributors may be used to endorse or promote products derived
+ *	   from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEST_CRYPTODEV_GCM_TEST_VECTORS_H_
+#define TEST_CRYPTODEV_GCM_TEST_VECTORS_H_
+
+struct gcm_test_data {
+	struct {
+		uint8_t data[64];
+		unsigned len;
+	} key;
+
+	struct {
+		uint8_t data[64] __rte_aligned(16);
+		unsigned len;
+	} iv;
+
+	struct {
+		uint8_t data[64];
+		unsigned len;
+	} aad;
+
+	struct {
+		uint8_t data[1024];
+		unsigned len;
+	} plaintext;
+
+	struct {
+		uint8_t data[1024];
+		unsigned len;
+	} ciphertext;
+
+	struct {
+		uint8_t data[16];
+		unsigned len;
+	} auth_tag;
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_1 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00 },
+		.len = 12
+	},
+	.aad = {
+		.data = { 0 },
+		.len = 0
+	},
+	.plaintext = {
+		.data = {
+			0x00 },
+		.len = 0
+	},
+	.ciphertext = {
+		.data = {
+			0x00
+		},
+		.len = 0
+	},
+	.auth_tag = {
+		.data = {
+			0x58, 0xe2, 0xfc, 0xce, 0xfa, 0x7e, 0x30, 0x61,
+			0x36, 0x7f, 0x1d, 0x57, 0xa4, 0xe7, 0x45, 0x5a },
+		.len = 16
+	}
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_2 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00 },
+		.len = 12
+	},
+	.aad = {
+		.data = { 0 },
+		.len = 0
+	},
+	.plaintext = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+		.len = 16
+	},
+	.ciphertext = {
+		.data = {
+			0x03, 0x88, 0xda, 0xce, 0x60, 0xb6, 0xa3, 0x92,
+			0xf3, 0x28, 0xc2, 0xb9, 0x71, 0xb2, 0xfe, 0x78 },
+		.len = 16
+	},
+	.auth_tag = {
+		.data = {
+			0xab, 0x6e, 0x47, 0xd4, 0x2c, 0xec, 0x13, 0xbd,
+			0xf5, 0x3a, 0x67, 0xb2, 0x12, 0x57, 0xbd, 0xdf },
+		.len = 16
+	}
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_3 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88 },
+		.len = 12
+	},
+	.aad = {
+		.data = { 0 },
+		.len = 0
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
+		.len = 64
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85
+		},
+		.len = 64
+	},
+	.auth_tag = {
+		.data = {
+			0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6,
+			0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4 },
+		.len = 16
+	}
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_4 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88 },
+		.len = 12
+	},
+	.aad = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+		.len = 8
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39
+		},
+		.len = 60
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91
+		},
+		.len = 60
+	},
+	.auth_tag = {
+		.data = {
+			0xA2, 0xA4, 0x35, 0x75, 0xDC, 0xB0, 0x57, 0x74,
+			0x07, 0x02, 0x30, 0xC2, 0xE7, 0x52, 0x02, 0x00
+		},
+		.len = 16
+	}
+
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_5 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88 },
+		.len = 12
+	},
+	.aad = {
+		.data = {
+			0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef },
+		.len = 8
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39
+		},
+		.len = 60
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91
+		},
+		.len = 60
+	},
+	.auth_tag = {
+		.data = {
+			0xC5, 0x2D, 0xFB, 0x54, 0xAF, 0xBB, 0x07, 0xA1,
+			0x9A, 0xFF, 0xBE, 0xE0, 0x61, 0x4C, 0xE7, 0xA5
+		},
+		.len = 16
+	}
+
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_6 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88
+		},
+		.len = 12
+	},
+	.aad = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00
+		},
+		.len = 12
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39
+		},
+		.len = 60
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91
+		},
+		.len = 60
+	},
+	.auth_tag = {
+		.data = {
+			0x74, 0xFC, 0xFA, 0x29, 0x3E, 0x60, 0xCC, 0x66,
+			0x09, 0xD6, 0xFD, 0x00, 0xC8, 0x86, 0xD5, 0x42
+		},
+		.len = 16
+	}
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_7 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88
+		},
+		.len = 12
+	},
+	.aad = {
+		.data = {
+			0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
+			0xfe, 0xed, 0xfa, 0xce
+		},
+		.len = 12
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39
+		},
+		.len = 60
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91
+		},
+		.len = 60
+	},
+	.auth_tag = {
+		.data = {
+			0xE9, 0xE4, 0xAB, 0x76, 0xB7, 0xFF, 0xEA, 0xDC,
+			0x69, 0x79, 0x38, 0xA2, 0x0D, 0xCA, 0xF5, 0x92
+		},
+		.len = 16
+	}
+};
+
+
+#endif /* TEST_CRYPTODEV_GCM_TEST_VECTORS_H_ */
diff --git a/config/common_base b/config/common_base
index 4202a89..8810809 100644
--- a/config/common_base
+++ b/config/common_base
@@ -337,6 +337,12 @@ CONFIG_RTE_AESNI_MB_PMD_MAX_NB_QUEUE_PAIRS=8
 CONFIG_RTE_AESNI_MB_PMD_MAX_NB_SESSIONS=2048
 
 #
+# Compile PMD for AESNI GCM  device
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
+CONFIG_RTE_LIBRTE_PMD_AESNI_GCM_DEBUG=n
+
+#
 # Compile PMD for SNOW 3G device
 #
 CONFIG_RTE_LIBRTE_PMD_SNOW3G=n
diff --git a/config/defconfig_i686-native-linuxapp-gcc b/config/defconfig_i686-native-linuxapp-gcc
index 290183a..c32859f 100644
--- a/config/defconfig_i686-native-linuxapp-gcc
+++ b/config/defconfig_i686-native-linuxapp-gcc
@@ -50,3 +50,13 @@ CONFIG_RTE_LIBRTE_KNI=n
 # Vectorized PMD is not supported on 32-bit
 #
 CONFIG_RTE_IXGBE_INC_VECTOR=n
+
+#
+# AES-NI multi-buffer PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
+
+#
+# AES-NI GCM PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
diff --git a/config/defconfig_i686-native-linuxapp-icc b/config/defconfig_i686-native-linuxapp-icc
index 96725f3..cde9d96 100644
--- a/config/defconfig_i686-native-linuxapp-icc
+++ b/config/defconfig_i686-native-linuxapp-icc
@@ -50,3 +50,13 @@ CONFIG_RTE_LIBRTE_KNI=n
 # Vectorized PMD is not supported on 32-bit
 #
 CONFIG_RTE_IXGBE_INC_VECTOR=n
+
+#
+# AES-NI multi-buffer PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
+
+#
+# AES-NI GCM PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index bf586d9..021ac0d 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -31,6 +31,7 @@
 
 include $(RTE_SDK)/mk/rte.vars.mk
 
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile
new file mode 100644
index 0000000..aa2621b
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/Makefile
@@ -0,0 +1,67 @@
+#   BSD LICENSE
+#
+#   Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
+$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
+endif
+
+# library name
+LIB = librte_pmd_aesni_gcm.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_aesni_gcm_version.map
+
+# external library include paths
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
+LDLIBS += -lcrypto
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
new file mode 100644
index 0000000..c399068
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -0,0 +1,127 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AESNI_GCM_OPS_H_
+#define _AESNI_GCM_OPS_H_
+
+#ifndef LINUX
+#define LINUX
+#endif
+
+#include <gcm_defines.h>
+#include <aux_funcs.h>
+
+/** Supported vector modes */
+enum aesni_gcm_vector_mode {
+	RTE_AESNI_GCM_NOT_SUPPORTED = 0,
+	RTE_AESNI_GCM_SSE,
+	RTE_AESNI_GCM_AVX,
+	RTE_AESNI_GCM_AVX2
+};
+
+typedef void (*aes_keyexp_128_enc_t)(void *key, void *enc_exp_keys);
+
+typedef void (*aesni_gcm_t)(gcm_data *my_ctx_data, u8 *out, const u8 *in,
+		u64 plaintext_len, u8 *iv, const u8 *aad, u64 aad_len,
+		u8 *auth_tag, u64 auth_tag_len);
+
+typedef void (*aesni_gcm_precomp_t)(gcm_data *my_ctx_data, u8 *hash_subkey);
+
+/** GCM library function pointer table */
+struct aesni_gcm_ops {
+	struct {
+		struct {
+			aes_keyexp_128_enc_t aes128_enc;
+			/**< AES128 enc key expansion */
+		} keyexp;
+		/**< Key expansion functions */
+	} aux; /**< Auxiliary functions */
+
+	struct {
+		aesni_gcm_t enc;	/**< GCM encode function pointer */
+		aesni_gcm_t dec;	/**< GCM decode function pointer */
+		aesni_gcm_precomp_t precomp;	/**< GCM pre-compute */
+	} gcm; /**< GCM functions */
+};
+
+
+static const struct aesni_gcm_ops gcm_ops[] = {
+	[RTE_AESNI_GCM_NOT_SUPPORTED] = {
+		.aux = {
+			.keyexp = {
+				NULL
+			}
+		},
+		.gcm = {
+			NULL
+		}
+	},
+	[RTE_AESNI_GCM_SSE] = {
+		.aux = {
+			.keyexp = {
+				aes_keyexp_128_enc_sse
+			}
+		},
+		.gcm = {
+			aesni_gcm_enc_sse,
+			aesni_gcm_dec_sse,
+			aesni_gcm_precomp_sse
+		}
+	},
+	[RTE_AESNI_GCM_AVX] = {
+		.aux = {
+			.keyexp = {
+				aes_keyexp_128_enc_avx,
+			}
+		},
+		.gcm = {
+			aesni_gcm_enc_avx_gen2,
+			aesni_gcm_dec_avx_gen2,
+			aesni_gcm_precomp_avx_gen2
+		}
+	},
+	[RTE_AESNI_GCM_AVX2] = {
+		.aux = {
+			.keyexp = {
+				aes_keyexp_128_enc_avx2,
+			}
+		},
+		.gcm = {
+			aesni_gcm_enc_avx_gen4,
+			aesni_gcm_dec_avx_gen4,
+			aesni_gcm_precomp_avx_gen4
+		}
+	}
+};
+
+
+#endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
new file mode 100644
index 0000000..8b377cb
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -0,0 +1,505 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <openssl/aes.h>
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "aesni_gcm_pmd_private.h"
+
+/**
+ * Global static parameter used to create a unique name for each AES-NI multi
+ * buffer crypto device.
+ */
+static unsigned unique_name_id;
+
+static inline int
+create_unique_device_name(char *name, size_t size)
+{
+	int ret;
+
+	if (name == NULL)
+		return -EINVAL;
+
+	ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_AESNI_GCM_PMD,
+			unique_name_id++);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+static int
+aesni_gcm_calculate_hash_sub_key(uint8_t *hsubkey, unsigned hsubkey_length,
+		uint8_t *aeskey, unsigned aeskey_length)
+{
+	uint8_t key[aeskey_length] __rte_aligned(16);
+	AES_KEY enc_key;
+
+	if (hsubkey_length % 16 != 0 && aeskey_length % 16 != 0)
+		return -EFAULT;
+
+	memcpy(key, aeskey, aeskey_length);
+
+	if (AES_set_encrypt_key(key, aeskey_length << 3, &enc_key) != 0)
+		return -EFAULT;
+
+	AES_encrypt(hsubkey, hsubkey, &enc_key);
+
+	return 0;
+}
+
+/** Get xform chain order */
+static int
+aesni_gcm_get_mode(const struct rte_crypto_sym_xform *xform)
+{
+	/*
+	 * GCM only supports authenticated encryption or authenticated
+	 * decryption, all other options are invalid, so we must have exactly
+	 * 2 xform structs chained together
+	 */
+	if (xform->next == NULL || xform->next->next != NULL)
+		return -1;
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		return AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+	}
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		return AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+	}
+
+	return -1;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
+		struct aesni_gcm_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_sym_xform *auth_xform = NULL;
+	const struct rte_crypto_sym_xform *cipher_xform = NULL;
+
+	uint8_t hsubkey[16] __rte_aligned(16) = { 0 };
+
+	/* Select Crypto operation - hash then cipher / cipher then hash */
+	switch (aesni_gcm_get_mode(xform)) {
+	case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
+		sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+
+		cipher_xform = xform;
+		auth_xform = xform->next;
+		break;
+	case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
+		sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+
+		auth_xform = xform;
+		cipher_xform = xform->next;
+		break;
+	default:
+		GCM_LOG_ERR("Unsupported operation chain order parameter");
+		return -EINVAL;
+	}
+
+	/* We only support AES GCM */
+	if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM &&
+			auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GCM)
+		return -EINVAL;
+
+	/* Select cipher direction */
+	if (sess->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION &&
+			cipher_xform->cipher.op !=
+					RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+		GCM_LOG_ERR("xform chain (CIPHER/AUTH) and cipher operation "
+				"(DECRYPT) specified are an invalid selection");
+		return -EINVAL;
+	} else if (sess->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION &&
+			cipher_xform->cipher.op !=
+					RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+		GCM_LOG_ERR("xform chain (AUTH/CIPHER) and cipher operation "
+				"(ENCRYPT) specified are an invalid selection");
+		return -EINVAL;
+	}
+
+	/* Expand GCM AES128 key */
+	(*gcm_ops->aux.keyexp.aes128_enc)(cipher_xform->cipher.key.data,
+			sess->gdata.expanded_keys);
+
+	/* Calculate hash sub key here */
+	aesni_gcm_calculate_hash_sub_key(hsubkey, sizeof(hsubkey),
+			cipher_xform->cipher.key.data,
+			cipher_xform->cipher.key.length);
+
+	/* Calculate GCM pre-compute */
+	(*gcm_ops->gcm.precomp)(&sess->gdata, hsubkey);
+
+	return 0;
+}
+
+/** Get gcm session */
+static struct aesni_gcm_session *
+aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
+{
+	struct aesni_gcm_session *sess = NULL;
+
+	if (op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+		if (unlikely(op->session->type != RTE_CRYPTODEV_AESNI_GCM_PMD))
+			return sess;
+
+		sess = (struct aesni_gcm_session *)op->session->_private;
+	} else  {
+		void *_sess;
+
+		if (rte_mempool_get(qp->sess_mp, &_sess))
+			return sess;
+
+		sess = (struct aesni_gcm_session *)
+			((struct rte_cryptodev_session *)_sess)->_private;
+
+		if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
+				sess, op->xform) != 0)) {
+			rte_mempool_put(qp->sess_mp, _sess);
+			sess = NULL;
+		}
+	}
+	return sess;
+}
+
+/**
+ * Process a crypto operation and complete a JOB_AES_HMAC job structure for
+ * submission to the multi buffer library for processing.
+ *
+ * @param	qp		queue pair
+ * @param	op		symmetric crypto operation
+ * @param	session		GCM session
+ *
+ * @return
+ *
+ */
+static int
+process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op,
+		struct aesni_gcm_session *session)
+{
+	uint8_t *src, *dst;
+	struct rte_mbuf *m = op->m_src;
+
+	src = rte_pktmbuf_mtod(m, uint8_t *) + op->cipher.data.offset;
+	dst = op->m_dst ?
+			rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
+					op->cipher.data.offset) :
+			rte_pktmbuf_mtod_offset(m, uint8_t *,
+					op->cipher.data.offset);
+
+	/* sanity checks */
+	if (op->cipher.iv.length != 16 && op->cipher.iv.length != 0) {
+		GCM_LOG_ERR("iv");
+		return -1;
+	}
+
+	if (op->auth.aad.length != 12 && op->auth.aad.length != 8 &&
+			op->auth.aad.length != 0) {
+		GCM_LOG_ERR("aad");
+		return -1;
+	}
+
+	if (op->auth.digest.length != 16 &&
+			op->auth.digest.length != 12 &&
+			op->auth.digest.length != 8 &&
+			op->auth.digest.length != 0) {
+		GCM_LOG_ERR("digest");
+		return -1;
+	}
+
+	if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
+
+		(*qp->ops->gcm.enc)(&session->gdata, dst, src,
+				(uint64_t)op->cipher.data.length,
+				op->cipher.iv.data,
+				op->auth.aad.data,
+				(uint64_t)op->auth.aad.length,
+				op->auth.digest.data,
+				(uint64_t)op->auth.digest.length);
+	} else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+		uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(m,
+				op->auth.digest.length);
+
+		if (!auth_tag) {
+			GCM_LOG_ERR("Not enough space for digest");
+			return -1;
+		}
+
+		(*qp->ops->gcm.dec)(&session->gdata, dst, src,
+				(uint64_t)op->cipher.data.length,
+				op->cipher.iv.data,
+				op->auth.aad.data,
+				(uint64_t)op->auth.aad.length,
+				auth_tag,
+				(uint64_t)op->auth.digest.length);
+	} else {
+		GCM_LOG_ERR("Wrong operation");
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param job	JOB_AES_HMAC job to process
+ *
+ * @return
+ * - Returns processed mbuf which is trimmed of output digest used in
+ * verification of supplied digest in the case of a HASH_CIPHER operation
+ * - Returns NULL on invalid job
+ */
+static void
+post_process_gcm_crypto_op(struct rte_crypto_op *op)
+{
+	struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+	struct aesni_gcm_session *session =
+		(struct aesni_gcm_session *)op->sym->session->_private;
+
+	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+	/* Verify digest if required */
+	if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+
+		uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
+				m->data_len - op->sym->auth.digest.length);
+
+#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
+		rte_hexdump(stdout, "auth tag (orig):",
+				op->auth.digest.data, op->auth.digest.length);
+		rte_hexdump(stdout, "auth tag (calc):",
+				tag, op->auth.digest.length);
+#endif
+
+		if (memcmp(tag, op->sym->auth.digest.data,
+				op->sym->auth.digest.length) != 0)
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+		/* trim area used for digest from mbuf */
+		rte_pktmbuf_trim(m, op->sym->auth.digest.length);
+	}
+}
+
+/**
+ * Process a completed GCM request
+ *
+ * @param qp		Queue Pair to process
+ * @param job		JOB_AES_HMAC job
+ *
+ * @return
+ * - Number of processed jobs
+ */
+static void
+handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
+		struct rte_crypto_op *op)
+{
+	post_process_gcm_crypto_op(op);
+
+	/* Free session if a session-less crypto op */
+	if (op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+		rte_mempool_put(qp->sess_mp, op->sym->session);
+		op->sym->session = NULL;
+	}
+
+	rte_ring_enqueue(qp->processed_pkts, (void *)op);
+}
+
+static uint16_t
+aesni_gcm_pmd_enqueue_burst(void *queue_pair,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct aesni_gcm_session *sess;
+	struct aesni_gcm_qp *qp = queue_pair;
+
+	int i, retval = 0;
+
+	for (i = 0; i < nb_ops; i++) {
+
+		sess = aesni_gcm_get_session(qp, ops[i]->sym);
+		if (unlikely(sess == NULL)) {
+			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			qp->qp_stats.enqueue_err_count++;
+			break;
+		}
+
+		retval = process_gcm_crypto_op(qp, ops[i]->sym, sess);
+		if (retval < 0) {
+			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			qp->qp_stats.enqueue_err_count++;
+			break;
+		}
+
+		handle_completed_gcm_crypto_op(qp, ops[i]);
+
+		qp->qp_stats.enqueued_count++;
+	}
+	return i;
+}
+
+static uint16_t
+aesni_gcm_pmd_dequeue_burst(void *queue_pair,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct aesni_gcm_qp *qp = queue_pair;
+
+	unsigned nb_dequeued;
+
+	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+			(void **)ops, nb_ops);
+	qp->qp_stats.dequeued_count += nb_dequeued;
+
+	return nb_dequeued;
+}
+
+static int aesni_gcm_uninit(const char *name);
+
+static int
+aesni_gcm_create(const char *name,
+		struct rte_crypto_vdev_init_params *init_params)
+{
+	struct rte_cryptodev *dev;
+	char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct aesni_gcm_private *internals;
+	enum aesni_gcm_vector_mode vector_mode;
+
+	/* Check CPU for support for AES instruction set */
+	if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+		GCM_LOG_ERR("AES instructions not supported by CPU");
+		return -EFAULT;
+	}
+
+	/* Check CPU for supported vector instruction set */
+	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+		vector_mode = RTE_AESNI_GCM_AVX2;
+	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+		vector_mode = RTE_AESNI_GCM_AVX;
+	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+		vector_mode = RTE_AESNI_GCM_SSE;
+	else {
+		GCM_LOG_ERR("Vector instructions are not supported by CPU");
+		return -EFAULT;
+	}
+
+	/* create a unique device name */
+	if (create_unique_device_name(crypto_dev_name,
+			RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
+		GCM_LOG_ERR("failed to create unique cryptodev name");
+		return -EINVAL;
+	}
+
+
+	dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+			sizeof(struct aesni_gcm_private), init_params->socket_id);
+	if (dev == NULL) {
+		GCM_LOG_ERR("failed to create cryptodev vdev");
+		goto init_error;
+	}
+
+	dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
+	dev->dev_ops = rte_aesni_gcm_pmd_ops;
+
+	/* register rx/tx burst functions for data path */
+	dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
+	dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
+
+	/* Set vector instructions mode supported */
+	internals = dev->data->dev_private;
+
+	internals->vector_mode = vector_mode;
+
+	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+	internals->max_nb_sessions = init_params->max_nb_sessions;
+
+	return 0;
+
+init_error:
+	GCM_LOG_ERR("driver %s: create failed", name);
+
+	aesni_gcm_uninit(crypto_dev_name);
+	return -EFAULT;
+}
+
+static int
+aesni_gcm_init(const char *name, const char *input_args)
+{
+	struct rte_crypto_vdev_init_params init_params = {
+		RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+		RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+		rte_socket_id()
+	};
+
+	rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+	RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+			init_params.socket_id);
+	RTE_LOG(INFO, PMD, "  Max number of queue pairs = %d\n",
+			init_params.max_nb_queue_pairs);
+	RTE_LOG(INFO, PMD, "  Max number of sessions = %d\n",
+			init_params.max_nb_sessions);
+
+	return aesni_gcm_create(name, &init_params);
+}
+
+static int
+aesni_gcm_uninit(const char *name)
+{
+	if (name == NULL)
+		return -EINVAL;
+
+	GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n",
+			name, rte_socket_id());
+
+	return 0;
+}
+
+static struct rte_driver aesni_gcm_pmd_drv = {
+	.name = CRYPTODEV_NAME_AESNI_GCM_PMD,
+	.type = PMD_VDEV,
+	.init = aesni_gcm_init,
+	.uninit = aesni_gcm_uninit
+};
+
+PMD_REGISTER_DRIVER(aesni_gcm_pmd_drv);
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
new file mode 100644
index 0000000..f865e0d
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -0,0 +1,292 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "aesni_gcm_pmd_private.h"
+
+/** Configure device */
+static int
+aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+/** Start device */
+static int
+aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+/** Stop device */
+static void
+aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+
+/** Get device statistics */
+static void
+aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->qp_stats.enqueued_count;
+		stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+	}
+}
+
+/** Reset device statistics */
+static void
+aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	}
+}
+
+
+/** Get device info */
+static void
+aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_info *dev_info)
+{
+	struct aesni_gcm_private *internals = dev->data->dev_private;
+
+	if (dev_info != NULL) {
+		dev_info->dev_type = dev->dev_type;
+
+		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+		dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+	}
+}
+
+/** Release queue pair */
+static int
+aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	if (dev->data->queue_pairs[qp_id] != NULL) {
+		rte_free(dev->data->queue_pairs[qp_id]);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+		struct aesni_gcm_qp *qp)
+{
+	unsigned n = snprintf(qp->name, sizeof(qp->name),
+			"aesni_gcm_pmd_%u_qp_%u",
+			dev->data->dev_id, qp->id);
+
+	if (n > sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
+		unsigned ring_size, int socket_id)
+{
+	struct rte_ring *r;
+
+	r = rte_ring_lookup(qp->name);
+	if (r) {
+		if (r->prod.size >= ring_size) {
+			GCM_LOG_INFO("Reusing existing ring %s for processed"
+					" packets", qp->name);
+			return r;
+		}
+
+		GCM_LOG_ERR("Unable to reuse existing ring %s for processed"
+				" packets", qp->name);
+		return NULL;
+	}
+
+	return rte_ring_create(qp->name, ring_size, socket_id,
+			RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		const struct rte_cryptodev_qp_conf *qp_conf,
+		 int socket_id)
+{
+	struct aesni_gcm_qp *qp = NULL;
+	struct aesni_gcm_private *internals = dev->data->dev_private;
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		aesni_gcm_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
+					RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL)
+		return (-ENOMEM);
+
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
+		goto qp_setup_cleanup;
+
+	qp->ops = &gcm_ops[internals->vector_mode];
+
+	qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
+			qp_conf->nb_descriptors, socket_id);
+	if (qp->processed_pkts == NULL)
+		goto qp_setup_cleanup;
+
+	qp->sess_mp = dev->data->session_pool;
+
+	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+	return 0;
+
+qp_setup_cleanup:
+	if (qp)
+		rte_free(qp);
+
+	return -1;
+}
+
+/** Start queue pair */
+static int
+aesni_gcm_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint16_t queue_pair_id)
+{
+	return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+aesni_gcm_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint16_t queue_pair_id)
+{
+	return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev)
+{
+	return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni gcm session structure */
+static unsigned
+aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct aesni_gcm_session);
+}
+
+/** Configure a aesni gcm session from a crypto xform chain */
+static void *
+aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_sym_xform *xform,	void *sess)
+{
+	struct aesni_gcm_private *internals = dev->data->dev_private;
+
+	if (unlikely(sess == NULL)) {
+		GCM_LOG_ERR("invalid session struct");
+		return NULL;
+	}
+
+	if (aesni_gcm_set_session_parameters(&gcm_ops[internals->vector_mode],
+			sess, xform) != 0) {
+		GCM_LOG_ERR("failed configure session parameters");
+		return NULL;
+	}
+
+	return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+	if (sess)
+		memset(sess, 0, sizeof(struct aesni_gcm_session));
+}
+
+struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
+		.dev_configure		= aesni_gcm_pmd_config,
+		.dev_start		= aesni_gcm_pmd_start,
+		.dev_stop		= aesni_gcm_pmd_stop,
+		.dev_close		= aesni_gcm_pmd_close,
+
+		.stats_get		= aesni_gcm_pmd_stats_get,
+		.stats_reset		= aesni_gcm_pmd_stats_reset,
+
+		.dev_infos_get		= aesni_gcm_pmd_info_get,
+
+		.queue_pair_setup	= aesni_gcm_pmd_qp_setup,
+		.queue_pair_release	= aesni_gcm_pmd_qp_release,
+		.queue_pair_start	= aesni_gcm_pmd_qp_start,
+		.queue_pair_stop	= aesni_gcm_pmd_qp_stop,
+		.queue_pair_count	= aesni_gcm_pmd_qp_count,
+
+		.session_get_size	= aesni_gcm_pmd_session_get_size,
+		.session_configure	= aesni_gcm_pmd_session_configure,
+		.session_clear		= aesni_gcm_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
new file mode 100644
index 0000000..a42f941
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -0,0 +1,120 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_AESNI_GCM_PMD_PRIVATE_H_
+#define _RTE_AESNI_GCM_PMD_PRIVATE_H_
+
+#include "aesni_gcm_ops.h"
+
+#define GCM_LOG_ERR(fmt, args...) \
+	RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",  \
+			CRYPTODEV_NAME_AESNI_GCM_PMD, \
+			__func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+#define GCM_LOG_INFO(fmt, args...) \
+	RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			CRYPTODEV_NAME_AESNI_GCM_PMD, \
+			__func__, __LINE__, ## args)
+
+#define GCM_LOG_DBG(fmt, args...) \
+	RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			CRYPTODEV_NAME_AESNI_GCM_PMD, \
+			__func__, __LINE__, ## args)
+#else
+#define GCM_LOG_INFO(fmt, args...)
+#define GCM_LOG_DBG(fmt, args...)
+#endif
+
+
+/** private data structure for each virtual AESNI GCM device */
+struct aesni_gcm_private {
+	enum aesni_gcm_vector_mode vector_mode;
+	/**< Vector mode */
+	unsigned max_nb_queue_pairs;
+	/**< Max number of queue pairs supported by device */
+	unsigned max_nb_sessions;
+	/**< Max number of sessions supported by device */
+};
+
+struct aesni_gcm_qp {
+	uint16_t id;
+	/**< Queue Pair Identifier */
+	char name[RTE_CRYPTODEV_NAME_LEN];
+	/**< Unique Queue Pair Name */
+	const struct aesni_gcm_ops *ops;
+	/**< Architecture dependent function pointer table of the gcm APIs */
+	struct rte_ring *processed_pkts;
+	/**< Ring for placing process packets */
+	struct rte_mempool *sess_mp;
+	/**< Session Mempool */
+	struct rte_cryptodev_stats qp_stats;
+	/**< Queue pair statistics */
+} __rte_cache_aligned;
+
+
+enum aesni_gcm_operation {
+	AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION,
+	AESNI_GCM_OP_AUTHENTICATED_DECRYPTION
+};
+
+/** AESNI GCM private session structure */
+struct aesni_gcm_session {
+	enum aesni_gcm_operation op;
+	/**< GCM operation type */
+	struct gcm_data gdata __rte_cache_aligned;
+	/**< GCM parameters */
+};
+
+
+/**
+ * Setup GCM session parameters
+ * @param	ops	gcm ops function pointer table
+ * @param	sess	aesni gcm session structure
+ * @param	xform	crypto transform chain
+ *
+ * @return
+ * - On success returns 0
+ * - On failure returns error code < 0
+ */
+extern int
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops,
+		struct aesni_gcm_session *sess,
+		const struct rte_crypto_sym_xform *xform);
+
+
+/**
+ * Device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops;
+
+
+#endif /* _RTE_AESNI_GCM_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map b/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
new file mode 100644
index 0000000..dc4d417
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
@@ -0,0 +1,3 @@
+DPDK_16.04 {
+	local: *;
+};
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index f279c92..c6c62e7 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -56,6 +56,8 @@ extern "C" {
 /**< Null crypto PMD device name */
 #define CRYPTODEV_NAME_AESNI_MB_PMD	("cryptodev_aesni_mb_pmd")
 /**< AES-NI Multi buffer PMD device name */
+#define CRYPTODEV_NAME_AESNI_GCM_PMD	("cryptodev_aesni_gcm_pmd")
+/**< AES-NI GCM PMD device name */
 #define CRYPTODEV_NAME_QAT_SYM_PMD	("cryptodev_qat_sym_pmd")
 /**< Intel QAT Symmetric Crypto PMD device name */
 #define CRYPTODEV_NAME_SNOW3G_PMD	("cryptodev_snow3g_pmd")
@@ -64,6 +66,7 @@ extern "C" {
 /** Crypto device type */
 enum rte_cryptodev_type {
 	RTE_CRYPTODEV_NULL_PMD = 1,	/**< Null crypto PMD */
+	RTE_CRYPTODEV_AESNI_GCM_PMD,	/**< AES-NI GCM PMD */
 	RTE_CRYPTODEV_AESNI_MB_PMD,	/**< AES-NI multi buffer PMD */
 	RTE_CRYPTODEV_QAT_SYM_PMD,	/**< QAT PMD Symmetric Crypto */
 	RTE_CRYPTODEV_SNOW3G_PMD,	/**< SNOW 3G PMD */
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 7e46370..0725f12 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -102,8 +102,13 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD)       += -libverbs
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2)   += -lsze2
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT)    += -lxenstore
 _LDLIBS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD)      += -lgxio
-# QAT PMD has a dependency on libcrypto (from openssl) for calculating HMAC precomputes
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT)        += -lcrypto
+# QAT / AESNI GCM PMDs are dependent on libcrypto (from openssl)
+# for calculating HMAC precomputes
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT),y)
+_LDLIBS-y                                   += -lcrypto
+else ifeq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM),y)
+_LDLIBS-y                                   += -lcrypto
+endif
 endif # !CONFIG_RTE_BUILD_SHARED_LIBS
 
 _LDLIBS-y += --start-group
@@ -146,9 +151,15 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET)  += -lrte_pmd_af_packet
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL)       += -lrte_pmd_null
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT)        += -lrte_pmd_qat
 
-# AESNI MULTI BUFFER is dependent on the IPSec_MB library
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB)   += -lrte_pmd_aesni_mb
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB)   += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM)   += -lrte_pmd_aesni_gcm
+
+# AESNI MULTI BUFFER / GCM PMDs are dependent on the IPSec_MB library
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_MB),y)
+_LDLIBS-y += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+else ifeq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM),y)
+_LDLIBS-y += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+endif
 
 # SNOW3G PMD is dependent on the LIBSSO library
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G)     += -lrte_pmd_snow3g
-- 
2.5.0

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [PATCH v4] aesni_gcm: PMD to support AES_GCM crypto operations
  2016-03-08 11:26   ` [dpdk-dev] [PATCH v3] " Pablo de Lara
@ 2016-03-10 16:41     ` Pablo de Lara
  2016-03-10 18:53       ` John Griffin
  2016-03-10 23:34       ` Thomas Monjalon
  0 siblings, 2 replies; 9+ messages in thread
From: Pablo de Lara @ 2016-03-10 16:41 UTC (permalink / raw)
  To: dev

From: Declan Doherty <declan.doherty@intel.com>

This patch provides the implementation of an AES-NI accelerated crypto PMD
which is dependent on Intel's multi-buffer library, see the white paper
"Fast Multi-buffer IPsec Implementations on Intel®  Architecture  Processors"

This PMD supports AES_GCM authenticated encryption and authenticated decryption using
128-bit AES keys

The patch also contains the related unit tests functions for the implemented functionality

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
--

This patch depends on "pmd/snow3g: add new SNOW 3G SW PMD" patch
(http://dpdk.org/dev/patchwork/patch/11424/).

Changes in v4:

- Fixed compilation error when debug flag is enabled
- Rebased to latest DPDK code
- Added missing document

Changes in v3:
- Fixed incorrect error messages

Changes in v2:

- Rebased against crypto API changes
- Removed static config options and allow user to provide them
  as virtual device parameters
- Changed DPDK version references from 2.3 to 16.04 
- Added missing library dependency


 MAINTAINERS                                        |   4 +
 app/test/test_cryptodev.c                          | 466 +++++++++++++++++++
 app/test/test_cryptodev_gcm_test_vectors.h         | 423 +++++++++++++++++
 config/common_base                                 |   6 +
 config/defconfig_i686-native-linuxapp-gcc          |  10 +
 config/defconfig_i686-native-linuxapp-icc          |  10 +
 doc/guides/cryptodevs/aesni_gcm.rst                |  66 +++
 doc/guides/cryptodevs/index.rst                    |   1 +
 doc/guides/rel_notes/release_16_04.rst             |   5 +
 drivers/crypto/Makefile                            |   1 +
 drivers/crypto/aesni_gcm/Makefile                  |  67 +++
 drivers/crypto/aesni_gcm/aesni_gcm_ops.h           | 127 ++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c           | 505 +++++++++++++++++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c       | 292 ++++++++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h   | 120 +++++
 .../crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map |   3 +
 lib/librte_cryptodev/rte_cryptodev.h               |   3 +
 mk/rte.app.mk                                      |  19 +-
 18 files changed, 2124 insertions(+), 4 deletions(-)
 create mode 100644 app/test/test_cryptodev_gcm_test_vectors.h
 create mode 100644 doc/guides/cryptodevs/aesni_gcm.rst
 create mode 100644 drivers/crypto/aesni_gcm/Makefile
 create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_ops.h
 create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
 create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
 create mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
 create mode 100644 drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 52198b7..d4b2d98 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -349,6 +349,10 @@ Null PMD
 M: Tetsuya Mukawa <mukawa@igel.co.jp>
 F: drivers/net/null/
 
+Intel AES-NI GCM PMD
+M: Declan Doherty <declan.doherty@intel.com>
+F: drivers/crypto/aesni_gcm/
+
 Intel AES-NI Multi-Buffer
 M: Declan Doherty <declan.doherty@intel.com>
 F: drivers/crypto/aesni_mb/
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 595b9f9..dfb7a8c 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -44,6 +44,8 @@
 #include "test_cryptodev.h"
 #include "test_cryptodev_snow3g_test_vectors.h"
 #include "test_cryptodev_snow3g_hash_test_vectors.h"
+#include "test_cryptodev_gcm_test_vectors.h"
+
 static enum rte_cryptodev_type gbl_cryptodev_type;
 
 struct crypto_testsuite_params {
@@ -195,6 +197,21 @@ testsuite_setup(void)
 		}
 	}
 
+	/* Create 2 AESNI GCM devices if required */
+	if (gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_GCM_PMD) {
+		nb_devs = rte_cryptodev_count_devtype(
+				RTE_CRYPTODEV_AESNI_GCM_PMD);
+		if (nb_devs < 2) {
+			for (i = nb_devs; i < 2; i++) {
+				TEST_ASSERT_SUCCESS(rte_eal_vdev_init(
+					CRYPTODEV_NAME_AESNI_GCM_PMD, NULL),
+					"Failed to create instance %u of"
+					" pmd : %s",
+					i, CRYPTODEV_NAME_AESNI_GCM_PMD);
+			}
+		}
+	}
+
 	/* Create 2 Snow3G devices if required */
 	if (gbl_cryptodev_type == RTE_CRYPTODEV_SNOW3G_PMD) {
 		nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_SNOW3G_PMD);
@@ -2760,6 +2777,400 @@ test_snow3g_encrypted_authentication_test_case_1(void)
 /* ***** AES-GCM Tests ***** */
 
 static int
+create_gcm_session(uint8_t dev_id, enum rte_crypto_cipher_operation op,
+		const uint8_t *key, const uint8_t key_len,
+		const uint8_t aad_len, const uint8_t auth_len)
+{
+	uint8_t cipher_key[key_len];
+
+	struct crypto_unittest_params *ut_params = &unittest_params;
+
+
+	memcpy(cipher_key, key, key_len);
+
+	/* Setup Cipher Parameters */
+	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+	ut_params->cipher_xform.next = NULL;
+
+	ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM;
+	ut_params->cipher_xform.cipher.op = op;
+	ut_params->cipher_xform.cipher.key.data = cipher_key;
+	ut_params->cipher_xform.cipher.key.length = key_len;
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "key:", key, key_len);
+#endif
+	/* Setup Authentication Parameters */
+	ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+	ut_params->auth_xform.next = NULL;
+
+	ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_AES_GCM;
+
+	ut_params->auth_xform.auth.digest_length = auth_len;
+	ut_params->auth_xform.auth.add_auth_data_length = aad_len;
+	ut_params->auth_xform.auth.key.length = 0;
+	ut_params->auth_xform.auth.key.data = NULL;
+
+	if (op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+		ut_params->cipher_xform.next = &ut_params->auth_xform;
+
+		/* Create Crypto session*/
+		ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
+				&ut_params->cipher_xform);
+	} else {/* Create Crypto session*/
+		ut_params->auth_xform.next = &ut_params->cipher_xform;
+		ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
+				&ut_params->auth_xform);
+	}
+
+	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
+
+	return 0;
+}
+
+static int
+create_gcm_operation(enum rte_crypto_cipher_operation op,
+		const uint8_t *auth_tag, const unsigned auth_tag_len,
+		const uint8_t *iv, const unsigned iv_len,
+		const uint8_t *aad, const unsigned aad_len,
+		const unsigned data_len, unsigned data_pad_len)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+
+	unsigned iv_pad_len = 0, aad_buffer_len;
+
+	/* Generate Crypto op data structure */
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
+
+	struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+
+
+	sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+			ut_params->ibuf, auth_tag_len);
+	TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+			"no room to append digest");
+	sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+			ut_params->ibuf, data_pad_len);
+	sym_op->auth.digest.length = auth_tag_len;
+
+	if (op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+		rte_memcpy(sym_op->auth.digest.data, auth_tag, auth_tag_len);
+#ifdef RTE_APP_TEST_DEBUG
+		rte_hexdump(stdout, "digest:",
+				ut_params->op->digest.data,
+				ut_params->op->digest.length);
+#endif
+	}
+
+	/* iv */
+	iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
+
+	sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+			ut_params->ibuf, iv_pad_len);
+	TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data, "no room to prepend iv");
+
+	memset(sym_op->cipher.iv.data, 0, iv_pad_len);
+	sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+	sym_op->cipher.iv.length = iv_pad_len;
+
+	rte_memcpy(sym_op->cipher.iv.data, iv, iv_len);
+
+	/* CalcY0 */
+	if (iv_len != 16)
+		sym_op->cipher.iv.data[15] = 1;
+
+	/*
+	 * Always allocate the aad up to the block size.
+	 * The cryptodev API calls out -
+	 *  - the array must be big enough to hold the AAD, plus any
+	 *   space to round this up to the nearest multiple of the
+	 *   block size (16 bytes).
+	 */
+	aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 16);
+
+	sym_op->auth.aad.data = (uint8_t *)rte_pktmbuf_prepend(
+			ut_params->ibuf, aad_buffer_len);
+	TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data,
+			"no room to prepend aad");
+	sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(
+			ut_params->ibuf);
+	sym_op->auth.aad.length = aad_len;
+
+	memset(sym_op->auth.aad.data, 0, aad_buffer_len);
+	rte_memcpy(sym_op->auth.aad.data, aad, aad_len);
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "iv:", ut_params->op->iv.data, iv_pad_len);
+	rte_hexdump(stdout, "aad:",
+			ut_params->op->additional_auth.data, aad_len);
+#endif
+	sym_op->cipher.data.length = data_len;
+	sym_op->cipher.data.offset = aad_buffer_len + iv_pad_len;
+
+	sym_op->auth.data.offset = aad_buffer_len + iv_pad_len;
+	sym_op->auth.data.length = data_len;
+
+	return 0;
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+
+	int retval;
+
+	uint8_t *plaintext, *ciphertext, *auth_tag;
+	uint16_t plaintext_pad_len;
+
+	/* Create GCM session */
+	retval = create_gcm_session(ts_params->valid_devs[0],
+			RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+			tdata->key.data, tdata->key.len,
+			tdata->aad.len, tdata->auth_tag.len);
+	if (retval < 0)
+		return retval;
+
+
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+	/* clear mbuf payload */
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+			rte_pktmbuf_tailroom(ut_params->ibuf));
+
+	/*
+	 * Append data which is padded to a multiple
+	 * of the algorithms block size
+	 */
+	plaintext_pad_len = RTE_ALIGN_CEIL(tdata->plaintext.len, 16);
+
+	plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+			plaintext_pad_len);
+	memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len);
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "plaintext:", plaintext, tdata->plaintext.len);
+#endif
+	/* Create GCM opertaion */
+	retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+			tdata->auth_tag.data, tdata->auth_tag.len,
+			tdata->iv.data, tdata->iv.len,
+			tdata->aad.data, tdata->aad.len,
+			tdata->plaintext.len, plaintext_pad_len);
+	if (retval < 0)
+		return retval;
+
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+	ut_params->op->sym->m_src = ut_params->ibuf;
+
+	/* Process crypto operation */
+	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+			ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
+
+	if (ut_params->op->sym->m_dst) {
+		ciphertext = rte_pktmbuf_mtod(ut_params->op->sym->m_dst,
+				uint8_t *);
+		auth_tag = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_dst,
+				uint8_t *, plaintext_pad_len);
+	} else {
+		ciphertext = plaintext;
+		auth_tag = plaintext + plaintext_pad_len;
+	}
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
+	rte_hexdump(stdout, "auth tag:", auth_tag, tdata->auth_tag.len);
+#endif
+	/* Validate obuf */
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			ciphertext,
+			tdata->ciphertext.data,
+			tdata->ciphertext.len,
+			"GCM Ciphertext data not as expected");
+
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			auth_tag,
+			tdata->auth_tag.data,
+			tdata->auth_tag.len,
+			"GCM Generated auth tag not as expected");
+
+	return 0;
+
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_1(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_1);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_2(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_2);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_3(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_3);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_4(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_4);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_5(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_5);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_6(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_6);
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_test_case_7(void)
+{
+	return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_7);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+
+	int retval;
+
+	uint8_t *plaintext, *ciphertext;
+	uint16_t ciphertext_pad_len;
+
+	/* Create GCM session */
+	retval = create_gcm_session(ts_params->valid_devs[0],
+			RTE_CRYPTO_CIPHER_OP_DECRYPT,
+			tdata->key.data, tdata->key.len,
+			tdata->aad.len, tdata->auth_tag.len);
+	if (retval < 0)
+		return retval;
+
+
+	/* alloc mbuf and set payload */
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+			rte_pktmbuf_tailroom(ut_params->ibuf));
+
+	ciphertext_pad_len = RTE_ALIGN_CEIL(tdata->ciphertext.len, 16);
+
+	ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+			ciphertext_pad_len);
+	memcpy(ciphertext, tdata->ciphertext.data, tdata->ciphertext.len);
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
+#endif
+	/* Create GCM opertaion */
+	retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_DECRYPT,
+			tdata->auth_tag.data, tdata->auth_tag.len,
+			tdata->iv.data, tdata->iv.len,
+			tdata->aad.data, tdata->aad.len,
+			tdata->ciphertext.len, ciphertext_pad_len);
+	if (retval < 0)
+		return retval;
+
+
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+	ut_params->op->sym->m_src = ut_params->ibuf;
+
+	/* Process crypto operation */
+	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+			ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
+
+	if (ut_params->op->sym->m_dst)
+		plaintext = rte_pktmbuf_mtod(ut_params->op->sym->m_dst,
+				uint8_t *);
+	else
+		plaintext = ciphertext;
+
+#ifdef RTE_APP_TEST_DEBUG
+	rte_hexdump(stdout, "plaintext:", plaintext, tdata->ciphertext.len);
+#endif
+	/* Validate obuf */
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			plaintext,
+			tdata->plaintext.data,
+			tdata->plaintext.len,
+			"GCM plaintext data not as expected");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status,
+			RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"GCM authentication failed");
+	return 0;
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_1(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_1);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_2(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_2);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_3(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_3);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_4(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_4);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_5(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_5);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_6(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_6);
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_test_case_7(void)
+{
+	return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_7);
+}
+
+static int
 test_stats(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -3075,6 +3486,47 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite  = {
 	}
 };
 
+static struct unit_test_suite cryptodev_aesni_gcm_testsuite  = {
+	.suite_name = "Crypto Device AESNI GCM Unit Test Suite",
+	.setup = testsuite_setup,
+	.teardown = testsuite_teardown,
+	.unit_test_cases = {
+		/** AES GCM Authenticated Encryption */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_2),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_7),
+
+		/** AES GCM Authenticated Decryption */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_2),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_7),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
+
 static struct unit_test_suite cryptodev_sw_snow3g_testsuite  = {
 	.suite_name = "Crypto Device SW Snow3G Unit Test Suite",
 	.setup = testsuite_setup,
@@ -3150,6 +3602,19 @@ static struct test_command cryptodev_aesni_mb_cmd = {
 };
 
 static int
+test_cryptodev_aesni_gcm(void)
+{
+	gbl_cryptodev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
+
+	return unit_test_suite_runner(&cryptodev_aesni_gcm_testsuite);
+}
+
+static struct test_command cryptodev_aesni_gcm_cmd = {
+	.command = "cryptodev_aesni_gcm_autotest",
+	.callback = test_cryptodev_aesni_gcm,
+};
+
+static int
 test_cryptodev_sw_snow3g(void /*argv __rte_unused, int argc __rte_unused*/)
 {
 	gbl_cryptodev_type = RTE_CRYPTODEV_SNOW3G_PMD;
@@ -3164,4 +3629,5 @@ static struct test_command cryptodev_sw_snow3g_cmd = {
 
 REGISTER_TEST_COMMAND(cryptodev_qat_cmd);
 REGISTER_TEST_COMMAND(cryptodev_aesni_mb_cmd);
+REGISTER_TEST_COMMAND(cryptodev_aesni_gcm_cmd);
 REGISTER_TEST_COMMAND(cryptodev_sw_snow3g_cmd);
diff --git a/app/test/test_cryptodev_gcm_test_vectors.h b/app/test/test_cryptodev_gcm_test_vectors.h
new file mode 100644
index 0000000..8ae22ba
--- /dev/null
+++ b/app/test/test_cryptodev_gcm_test_vectors.h
@@ -0,0 +1,423 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	 * Redistributions of source code must retain the above copyright
+ *	   notice, this list of conditions and the following disclaimer.
+ *	 * Redistributions in binary form must reproduce the above copyright
+ *	   notice, this list of conditions and the following disclaimer in
+ *	   the documentation and/or other materials provided with the
+ *	   distribution.
+ *	 * Neither the name of Intel Corporation nor the names of its
+ *	   contributors may be used to endorse or promote products derived
+ *	   from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEST_CRYPTODEV_GCM_TEST_VECTORS_H_
+#define TEST_CRYPTODEV_GCM_TEST_VECTORS_H_
+
+struct gcm_test_data {
+	struct {
+		uint8_t data[64];
+		unsigned len;
+	} key;
+
+	struct {
+		uint8_t data[64] __rte_aligned(16);
+		unsigned len;
+	} iv;
+
+	struct {
+		uint8_t data[64];
+		unsigned len;
+	} aad;
+
+	struct {
+		uint8_t data[1024];
+		unsigned len;
+	} plaintext;
+
+	struct {
+		uint8_t data[1024];
+		unsigned len;
+	} ciphertext;
+
+	struct {
+		uint8_t data[16];
+		unsigned len;
+	} auth_tag;
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_1 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00 },
+		.len = 12
+	},
+	.aad = {
+		.data = { 0 },
+		.len = 0
+	},
+	.plaintext = {
+		.data = {
+			0x00 },
+		.len = 0
+	},
+	.ciphertext = {
+		.data = {
+			0x00
+		},
+		.len = 0
+	},
+	.auth_tag = {
+		.data = {
+			0x58, 0xe2, 0xfc, 0xce, 0xfa, 0x7e, 0x30, 0x61,
+			0x36, 0x7f, 0x1d, 0x57, 0xa4, 0xe7, 0x45, 0x5a },
+		.len = 16
+	}
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_2 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00 },
+		.len = 12
+	},
+	.aad = {
+		.data = { 0 },
+		.len = 0
+	},
+	.plaintext = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+		.len = 16
+	},
+	.ciphertext = {
+		.data = {
+			0x03, 0x88, 0xda, 0xce, 0x60, 0xb6, 0xa3, 0x92,
+			0xf3, 0x28, 0xc2, 0xb9, 0x71, 0xb2, 0xfe, 0x78 },
+		.len = 16
+	},
+	.auth_tag = {
+		.data = {
+			0xab, 0x6e, 0x47, 0xd4, 0x2c, 0xec, 0x13, 0xbd,
+			0xf5, 0x3a, 0x67, 0xb2, 0x12, 0x57, 0xbd, 0xdf },
+		.len = 16
+	}
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_3 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88 },
+		.len = 12
+	},
+	.aad = {
+		.data = { 0 },
+		.len = 0
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
+		.len = 64
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85
+		},
+		.len = 64
+	},
+	.auth_tag = {
+		.data = {
+			0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6,
+			0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4 },
+		.len = 16
+	}
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_4 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88 },
+		.len = 12
+	},
+	.aad = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+		.len = 8
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39
+		},
+		.len = 60
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91
+		},
+		.len = 60
+	},
+	.auth_tag = {
+		.data = {
+			0xA2, 0xA4, 0x35, 0x75, 0xDC, 0xB0, 0x57, 0x74,
+			0x07, 0x02, 0x30, 0xC2, 0xE7, 0x52, 0x02, 0x00
+		},
+		.len = 16
+	}
+
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_5 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88 },
+		.len = 12
+	},
+	.aad = {
+		.data = {
+			0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef },
+		.len = 8
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39
+		},
+		.len = 60
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91
+		},
+		.len = 60
+	},
+	.auth_tag = {
+		.data = {
+			0xC5, 0x2D, 0xFB, 0x54, 0xAF, 0xBB, 0x07, 0xA1,
+			0x9A, 0xFF, 0xBE, 0xE0, 0x61, 0x4C, 0xE7, 0xA5
+		},
+		.len = 16
+	}
+
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_6 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88
+		},
+		.len = 12
+	},
+	.aad = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00
+		},
+		.len = 12
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39
+		},
+		.len = 60
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91
+		},
+		.len = 60
+	},
+	.auth_tag = {
+		.data = {
+			0x74, 0xFC, 0xFA, 0x29, 0x3E, 0x60, 0xCC, 0x66,
+			0x09, 0xD6, 0xFD, 0x00, 0xC8, 0x86, 0xD5, 0x42
+		},
+		.len = 16
+	}
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_7 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88
+		},
+		.len = 12
+	},
+	.aad = {
+		.data = {
+			0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
+			0xfe, 0xed, 0xfa, 0xce
+		},
+		.len = 12
+	},
+	.plaintext = {
+		.data = {
+			0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+			0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+			0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+			0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+			0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+			0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+			0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+			0xba, 0x63, 0x7b, 0x39
+		},
+		.len = 60
+	},
+	.ciphertext = {
+		.data = {
+			0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+			0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+			0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+			0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+			0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+			0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+			0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+			0x3d, 0x58, 0xe0, 0x91
+		},
+		.len = 60
+	},
+	.auth_tag = {
+		.data = {
+			0xE9, 0xE4, 0xAB, 0x76, 0xB7, 0xFF, 0xEA, 0xDC,
+			0x69, 0x79, 0x38, 0xA2, 0x0D, 0xCA, 0xF5, 0x92
+		},
+		.len = 16
+	}
+};
+
+
+#endif /* TEST_CRYPTODEV_GCM_TEST_VECTORS_H_ */
diff --git a/config/common_base b/config/common_base
index 99f2d31..287fa34 100644
--- a/config/common_base
+++ b/config/common_base
@@ -338,6 +338,12 @@ CONFIG_RTE_AESNI_MB_PMD_MAX_NB_QUEUE_PAIRS=8
 CONFIG_RTE_AESNI_MB_PMD_MAX_NB_SESSIONS=2048
 
 #
+# Compile PMD for AESNI GCM  device
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
+CONFIG_RTE_LIBRTE_PMD_AESNI_GCM_DEBUG=n
+
+#
 # Compile PMD for SNOW 3G device
 #
 CONFIG_RTE_LIBRTE_PMD_SNOW3G=n
diff --git a/config/defconfig_i686-native-linuxapp-gcc b/config/defconfig_i686-native-linuxapp-gcc
index 290183a..c32859f 100644
--- a/config/defconfig_i686-native-linuxapp-gcc
+++ b/config/defconfig_i686-native-linuxapp-gcc
@@ -50,3 +50,13 @@ CONFIG_RTE_LIBRTE_KNI=n
 # Vectorized PMD is not supported on 32-bit
 #
 CONFIG_RTE_IXGBE_INC_VECTOR=n
+
+#
+# AES-NI multi-buffer PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
+
+#
+# AES-NI GCM PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
diff --git a/config/defconfig_i686-native-linuxapp-icc b/config/defconfig_i686-native-linuxapp-icc
index 96725f3..cde9d96 100644
--- a/config/defconfig_i686-native-linuxapp-icc
+++ b/config/defconfig_i686-native-linuxapp-icc
@@ -50,3 +50,13 @@ CONFIG_RTE_LIBRTE_KNI=n
 # Vectorized PMD is not supported on 32-bit
 #
 CONFIG_RTE_IXGBE_INC_VECTOR=n
+
+#
+# AES-NI multi-buffer PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
+
+#
+# AES-NI GCM PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
new file mode 100644
index 0000000..908b3fd
--- /dev/null
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -0,0 +1,66 @@
+..  BSD LICENSE
+    Copyright(c) 2016 Intel Corporation. All rights reserved.
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions
+    are met:
+
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+    * Neither the name of Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived
+    from this software without specific prior written permission.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+AES-NI GCM Crypto Poll Mode Driver
+============================================
+
+
+The AES-NI GCM PMD (**librte_pmd_aesni_gcm**) provides poll mode crypto driver
+support for utilizing Intel multi buffer library (see AES-NI Multi-buffer PMD documentation
+to learn more about it, including installation).
+
+The AES-NI GCM PMD has current only been tested on Fedora 21 64-bit with gcc.
+
+Features
+--------
+
+AESNI GCM PMD has support for:
+
+Cipher algorithms:
+
+* RTE_CRYPTO_CIPHER_AES_GCM
+
+Authentication algorithms:
+
+* RTE_CRYPTO_AUTH_AES_GCM
+
+Limitations
+-----------
+
+* Chained mbufs are not supported.
+* Hash only is not supported.
+* Cipher only is not supported.
+* Only in-place is currently supported (destination address is the same as source address).
+* Only supports session-oriented API implementation (session-less APIs are not supported).
+*  Not performance tuned.
+
+The environmental variable
+AESNI_MULTI_BUFFER_LIB_PATH must be exported with the path where you extracted
+and built the multi buffer library and finally set
+CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=y in config/common_base.
diff --git a/doc/guides/cryptodevs/index.rst b/doc/guides/cryptodevs/index.rst
index 071e7d2..3c5e04f 100644
--- a/doc/guides/cryptodevs/index.rst
+++ b/doc/guides/cryptodevs/index.rst
@@ -36,5 +36,6 @@ Crypto Device Drivers
     :numbered:
 
     aesni_mb
+    aesni_gcm
     snow3g
     qat
diff --git a/doc/guides/rel_notes/release_16_04.rst b/doc/guides/rel_notes/release_16_04.rst
index 4fb0738..b4ed25d 100644
--- a/doc/guides/rel_notes/release_16_04.rst
+++ b/doc/guides/rel_notes/release_16_04.rst
@@ -85,6 +85,11 @@ This section should contain new features added in this release. Sample format:
   A new Crypto PMD has been added, which provides SNOW 3G UEA2 ciphering
   and SNOW3G UIA2 hashing.
 
+* **Added AES GCM PMD**
+
+  Added new Crypto PMD to support AES-GCM authenticated encryption and
+  authenticated decryption in SW
+
 Resolved Issues
 ---------------
 
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index bf586d9..021ac0d 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -31,6 +31,7 @@
 
 include $(RTE_SDK)/mk/rte.vars.mk
 
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile
new file mode 100644
index 0000000..aa2621b
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/Makefile
@@ -0,0 +1,67 @@
+#   BSD LICENSE
+#
+#   Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
+$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
+endif
+
+# library name
+LIB = librte_pmd_aesni_gcm.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_aesni_gcm_version.map
+
+# external library include paths
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
+LDLIBS += -lcrypto
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
new file mode 100644
index 0000000..c399068
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -0,0 +1,127 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AESNI_GCM_OPS_H_
+#define _AESNI_GCM_OPS_H_
+
+#ifndef LINUX
+#define LINUX
+#endif
+
+#include <gcm_defines.h>
+#include <aux_funcs.h>
+
+/** Supported vector modes */
+enum aesni_gcm_vector_mode {
+	RTE_AESNI_GCM_NOT_SUPPORTED = 0,
+	RTE_AESNI_GCM_SSE,
+	RTE_AESNI_GCM_AVX,
+	RTE_AESNI_GCM_AVX2
+};
+
+typedef void (*aes_keyexp_128_enc_t)(void *key, void *enc_exp_keys);
+
+typedef void (*aesni_gcm_t)(gcm_data *my_ctx_data, u8 *out, const u8 *in,
+		u64 plaintext_len, u8 *iv, const u8 *aad, u64 aad_len,
+		u8 *auth_tag, u64 auth_tag_len);
+
+typedef void (*aesni_gcm_precomp_t)(gcm_data *my_ctx_data, u8 *hash_subkey);
+
+/** GCM library function pointer table */
+struct aesni_gcm_ops {
+	struct {
+		struct {
+			aes_keyexp_128_enc_t aes128_enc;
+			/**< AES128 enc key expansion */
+		} keyexp;
+		/**< Key expansion functions */
+	} aux; /**< Auxiliary functions */
+
+	struct {
+		aesni_gcm_t enc;	/**< GCM encode function pointer */
+		aesni_gcm_t dec;	/**< GCM decode function pointer */
+		aesni_gcm_precomp_t precomp;	/**< GCM pre-compute */
+	} gcm; /**< GCM functions */
+};
+
+
+static const struct aesni_gcm_ops gcm_ops[] = {
+	[RTE_AESNI_GCM_NOT_SUPPORTED] = {
+		.aux = {
+			.keyexp = {
+				NULL
+			}
+		},
+		.gcm = {
+			NULL
+		}
+	},
+	[RTE_AESNI_GCM_SSE] = {
+		.aux = {
+			.keyexp = {
+				aes_keyexp_128_enc_sse
+			}
+		},
+		.gcm = {
+			aesni_gcm_enc_sse,
+			aesni_gcm_dec_sse,
+			aesni_gcm_precomp_sse
+		}
+	},
+	[RTE_AESNI_GCM_AVX] = {
+		.aux = {
+			.keyexp = {
+				aes_keyexp_128_enc_avx,
+			}
+		},
+		.gcm = {
+			aesni_gcm_enc_avx_gen2,
+			aesni_gcm_dec_avx_gen2,
+			aesni_gcm_precomp_avx_gen2
+		}
+	},
+	[RTE_AESNI_GCM_AVX2] = {
+		.aux = {
+			.keyexp = {
+				aes_keyexp_128_enc_avx2,
+			}
+		},
+		.gcm = {
+			aesni_gcm_enc_avx_gen4,
+			aesni_gcm_dec_avx_gen4,
+			aesni_gcm_precomp_avx_gen4
+		}
+	}
+};
+
+
+#endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
new file mode 100644
index 0000000..83aa272
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -0,0 +1,505 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <openssl/aes.h>
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "aesni_gcm_pmd_private.h"
+
+/**
+ * Global static parameter used to create a unique name for each AES-NI multi
+ * buffer crypto device.
+ */
+static unsigned unique_name_id;
+
+static inline int
+create_unique_device_name(char *name, size_t size)
+{
+	int ret;
+
+	if (name == NULL)
+		return -EINVAL;
+
+	ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_AESNI_GCM_PMD,
+			unique_name_id++);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+static int
+aesni_gcm_calculate_hash_sub_key(uint8_t *hsubkey, unsigned hsubkey_length,
+		uint8_t *aeskey, unsigned aeskey_length)
+{
+	uint8_t key[aeskey_length] __rte_aligned(16);
+	AES_KEY enc_key;
+
+	if (hsubkey_length % 16 != 0 && aeskey_length % 16 != 0)
+		return -EFAULT;
+
+	memcpy(key, aeskey, aeskey_length);
+
+	if (AES_set_encrypt_key(key, aeskey_length << 3, &enc_key) != 0)
+		return -EFAULT;
+
+	AES_encrypt(hsubkey, hsubkey, &enc_key);
+
+	return 0;
+}
+
+/** Get xform chain order */
+static int
+aesni_gcm_get_mode(const struct rte_crypto_sym_xform *xform)
+{
+	/*
+	 * GCM only supports authenticated encryption or authenticated
+	 * decryption, all other options are invalid, so we must have exactly
+	 * 2 xform structs chained together
+	 */
+	if (xform->next == NULL || xform->next->next != NULL)
+		return -1;
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		return AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+	}
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		return AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+	}
+
+	return -1;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
+		struct aesni_gcm_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_sym_xform *auth_xform = NULL;
+	const struct rte_crypto_sym_xform *cipher_xform = NULL;
+
+	uint8_t hsubkey[16] __rte_aligned(16) = { 0 };
+
+	/* Select Crypto operation - hash then cipher / cipher then hash */
+	switch (aesni_gcm_get_mode(xform)) {
+	case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
+		sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+
+		cipher_xform = xform;
+		auth_xform = xform->next;
+		break;
+	case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
+		sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+
+		auth_xform = xform;
+		cipher_xform = xform->next;
+		break;
+	default:
+		GCM_LOG_ERR("Unsupported operation chain order parameter");
+		return -EINVAL;
+	}
+
+	/* We only support AES GCM */
+	if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM &&
+			auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GCM)
+		return -EINVAL;
+
+	/* Select cipher direction */
+	if (sess->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION &&
+			cipher_xform->cipher.op !=
+					RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+		GCM_LOG_ERR("xform chain (CIPHER/AUTH) and cipher operation "
+				"(DECRYPT) specified are an invalid selection");
+		return -EINVAL;
+	} else if (sess->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION &&
+			cipher_xform->cipher.op !=
+					RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+		GCM_LOG_ERR("xform chain (AUTH/CIPHER) and cipher operation "
+				"(ENCRYPT) specified are an invalid selection");
+		return -EINVAL;
+	}
+
+	/* Expand GCM AES128 key */
+	(*gcm_ops->aux.keyexp.aes128_enc)(cipher_xform->cipher.key.data,
+			sess->gdata.expanded_keys);
+
+	/* Calculate hash sub key here */
+	aesni_gcm_calculate_hash_sub_key(hsubkey, sizeof(hsubkey),
+			cipher_xform->cipher.key.data,
+			cipher_xform->cipher.key.length);
+
+	/* Calculate GCM pre-compute */
+	(*gcm_ops->gcm.precomp)(&sess->gdata, hsubkey);
+
+	return 0;
+}
+
+/** Get gcm session */
+static struct aesni_gcm_session *
+aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
+{
+	struct aesni_gcm_session *sess = NULL;
+
+	if (op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+		if (unlikely(op->session->type != RTE_CRYPTODEV_AESNI_GCM_PMD))
+			return sess;
+
+		sess = (struct aesni_gcm_session *)op->session->_private;
+	} else  {
+		void *_sess;
+
+		if (rte_mempool_get(qp->sess_mp, &_sess))
+			return sess;
+
+		sess = (struct aesni_gcm_session *)
+			((struct rte_cryptodev_session *)_sess)->_private;
+
+		if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
+				sess, op->xform) != 0)) {
+			rte_mempool_put(qp->sess_mp, _sess);
+			sess = NULL;
+		}
+	}
+	return sess;
+}
+
+/**
+ * Process a crypto operation and complete a JOB_AES_HMAC job structure for
+ * submission to the multi buffer library for processing.
+ *
+ * @param	qp		queue pair
+ * @param	op		symmetric crypto operation
+ * @param	session		GCM session
+ *
+ * @return
+ *
+ */
+static int
+process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op,
+		struct aesni_gcm_session *session)
+{
+	uint8_t *src, *dst;
+	struct rte_mbuf *m = op->m_src;
+
+	src = rte_pktmbuf_mtod(m, uint8_t *) + op->cipher.data.offset;
+	dst = op->m_dst ?
+			rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
+					op->cipher.data.offset) :
+			rte_pktmbuf_mtod_offset(m, uint8_t *,
+					op->cipher.data.offset);
+
+	/* sanity checks */
+	if (op->cipher.iv.length != 16 && op->cipher.iv.length != 0) {
+		GCM_LOG_ERR("iv");
+		return -1;
+	}
+
+	if (op->auth.aad.length != 12 && op->auth.aad.length != 8 &&
+			op->auth.aad.length != 0) {
+		GCM_LOG_ERR("iv");
+		return -1;
+	}
+
+	if (op->auth.digest.length != 16 &&
+			op->auth.digest.length != 12 &&
+			op->auth.digest.length != 8 &&
+			op->auth.digest.length != 0) {
+		GCM_LOG_ERR("iv");
+		return -1;
+	}
+
+	if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
+
+		(*qp->ops->gcm.enc)(&session->gdata, dst, src,
+				(uint64_t)op->cipher.data.length,
+				op->cipher.iv.data,
+				op->auth.aad.data,
+				(uint64_t)op->auth.aad.length,
+				op->auth.digest.data,
+				(uint64_t)op->auth.digest.length);
+	} else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+		uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(m,
+				op->auth.digest.length);
+
+		if (!auth_tag) {
+			GCM_LOG_ERR("iv");
+			return -1;
+		}
+
+		(*qp->ops->gcm.dec)(&session->gdata, dst, src,
+				(uint64_t)op->cipher.data.length,
+				op->cipher.iv.data,
+				op->auth.aad.data,
+				(uint64_t)op->auth.aad.length,
+				auth_tag,
+				(uint64_t)op->auth.digest.length);
+	} else {
+		GCM_LOG_ERR("iv");
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param job	JOB_AES_HMAC job to process
+ *
+ * @return
+ * - Returns processed mbuf which is trimmed of output digest used in
+ * verification of supplied digest in the case of a HASH_CIPHER operation
+ * - Returns NULL on invalid job
+ */
+static void
+post_process_gcm_crypto_op(struct rte_crypto_op *op)
+{
+	struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+	struct aesni_gcm_session *session =
+		(struct aesni_gcm_session *)op->sym->session->_private;
+
+	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+	/* Verify digest if required */
+	if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+
+		uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
+				m->data_len - op->sym->auth.digest.length);
+
+#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
+		rte_hexdump(stdout, "auth tag (orig):",
+				op->sym->auth.digest.data, op->sym->auth.digest.length);
+		rte_hexdump(stdout, "auth tag (calc):",
+				tag, op->sym->auth.digest.length);
+#endif
+
+		if (memcmp(tag, op->sym->auth.digest.data,
+				op->sym->auth.digest.length) != 0)
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+		/* trim area used for digest from mbuf */
+		rte_pktmbuf_trim(m, op->sym->auth.digest.length);
+	}
+}
+
+/**
+ * Process a completed GCM request
+ *
+ * @param qp		Queue Pair to process
+ * @param job		JOB_AES_HMAC job
+ *
+ * @return
+ * - Number of processed jobs
+ */
+static void
+handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
+		struct rte_crypto_op *op)
+{
+	post_process_gcm_crypto_op(op);
+
+	/* Free session if a session-less crypto op */
+	if (op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+		rte_mempool_put(qp->sess_mp, op->sym->session);
+		op->sym->session = NULL;
+	}
+
+	rte_ring_enqueue(qp->processed_pkts, (void *)op);
+}
+
+static uint16_t
+aesni_gcm_pmd_enqueue_burst(void *queue_pair,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct aesni_gcm_session *sess;
+	struct aesni_gcm_qp *qp = queue_pair;
+
+	int i, retval = 0;
+
+	for (i = 0; i < nb_ops; i++) {
+
+		sess = aesni_gcm_get_session(qp, ops[i]->sym);
+		if (unlikely(sess == NULL)) {
+			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			qp->qp_stats.enqueue_err_count++;
+			break;
+		}
+
+		retval = process_gcm_crypto_op(qp, ops[i]->sym, sess);
+		if (retval < 0) {
+			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			qp->qp_stats.enqueue_err_count++;
+			break;
+		}
+
+		handle_completed_gcm_crypto_op(qp, ops[i]);
+
+		qp->qp_stats.enqueued_count++;
+	}
+	return i;
+}
+
+static uint16_t
+aesni_gcm_pmd_dequeue_burst(void *queue_pair,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct aesni_gcm_qp *qp = queue_pair;
+
+	unsigned nb_dequeued;
+
+	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+			(void **)ops, nb_ops);
+	qp->qp_stats.dequeued_count += nb_dequeued;
+
+	return nb_dequeued;
+}
+
+static int aesni_gcm_uninit(const char *name);
+
+static int
+aesni_gcm_create(const char *name,
+		struct rte_crypto_vdev_init_params *init_params)
+{
+	struct rte_cryptodev *dev;
+	char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct aesni_gcm_private *internals;
+	enum aesni_gcm_vector_mode vector_mode;
+
+	/* Check CPU for support for AES instruction set */
+	if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+		GCM_LOG_ERR("AES instructions not supported by CPU");
+		return -EFAULT;
+	}
+
+	/* Check CPU for supported vector instruction set */
+	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+		vector_mode = RTE_AESNI_GCM_AVX2;
+	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+		vector_mode = RTE_AESNI_GCM_AVX;
+	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+		vector_mode = RTE_AESNI_GCM_SSE;
+	else {
+		GCM_LOG_ERR("Vector instructions are not supported by CPU");
+		return -EFAULT;
+	}
+
+	/* create a unique device name */
+	if (create_unique_device_name(crypto_dev_name,
+			RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
+		GCM_LOG_ERR("failed to create unique cryptodev name");
+		return -EINVAL;
+	}
+
+
+	dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+			sizeof(struct aesni_gcm_private), init_params->socket_id);
+	if (dev == NULL) {
+		GCM_LOG_ERR("failed to create cryptodev vdev");
+		goto init_error;
+	}
+
+	dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
+	dev->dev_ops = rte_aesni_gcm_pmd_ops;
+
+	/* register rx/tx burst functions for data path */
+	dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
+	dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
+
+	/* Set vector instructions mode supported */
+	internals = dev->data->dev_private;
+
+	internals->vector_mode = vector_mode;
+
+	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+	internals->max_nb_sessions = init_params->max_nb_sessions;
+
+	return 0;
+
+init_error:
+	GCM_LOG_ERR("driver %s: create failed", name);
+
+	aesni_gcm_uninit(crypto_dev_name);
+	return -EFAULT;
+}
+
+static int
+aesni_gcm_init(const char *name, const char *input_args)
+{
+	struct rte_crypto_vdev_init_params init_params = {
+		RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+		RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+		rte_socket_id()
+	};
+
+	rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+	RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+			init_params.socket_id);
+	RTE_LOG(INFO, PMD, "  Max number of queue pairs = %d\n",
+			init_params.max_nb_queue_pairs);
+	RTE_LOG(INFO, PMD, "  Max number of sessions = %d\n",
+			init_params.max_nb_sessions);
+
+	return aesni_gcm_create(name, &init_params);
+}
+
+static int
+aesni_gcm_uninit(const char *name)
+{
+	if (name == NULL)
+		return -EINVAL;
+
+	GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n",
+			name, rte_socket_id());
+
+	return 0;
+}
+
+static struct rte_driver aesni_gcm_pmd_drv = {
+	.name = CRYPTODEV_NAME_AESNI_GCM_PMD,
+	.type = PMD_VDEV,
+	.init = aesni_gcm_init,
+	.uninit = aesni_gcm_uninit
+};
+
+PMD_REGISTER_DRIVER(aesni_gcm_pmd_drv);
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
new file mode 100644
index 0000000..f865e0d
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -0,0 +1,292 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "aesni_gcm_pmd_private.h"
+
+/** Configure device */
+static int
+aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+/** Start device */
+static int
+aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+/** Stop device */
+static void
+aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+
+/** Get device statistics */
+static void
+aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->qp_stats.enqueued_count;
+		stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+	}
+}
+
+/** Reset device statistics */
+static void
+aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	}
+}
+
+
+/** Get device info */
+static void
+aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_info *dev_info)
+{
+	struct aesni_gcm_private *internals = dev->data->dev_private;
+
+	if (dev_info != NULL) {
+		dev_info->dev_type = dev->dev_type;
+
+		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+		dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+	}
+}
+
+/** Release queue pair */
+static int
+aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	if (dev->data->queue_pairs[qp_id] != NULL) {
+		rte_free(dev->data->queue_pairs[qp_id]);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+		struct aesni_gcm_qp *qp)
+{
+	unsigned n = snprintf(qp->name, sizeof(qp->name),
+			"aesni_gcm_pmd_%u_qp_%u",
+			dev->data->dev_id, qp->id);
+
+	if (n > sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
+		unsigned ring_size, int socket_id)
+{
+	struct rte_ring *r;
+
+	r = rte_ring_lookup(qp->name);
+	if (r) {
+		if (r->prod.size >= ring_size) {
+			GCM_LOG_INFO("Reusing existing ring %s for processed"
+					" packets", qp->name);
+			return r;
+		}
+
+		GCM_LOG_ERR("Unable to reuse existing ring %s for processed"
+				" packets", qp->name);
+		return NULL;
+	}
+
+	return rte_ring_create(qp->name, ring_size, socket_id,
+			RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		const struct rte_cryptodev_qp_conf *qp_conf,
+		 int socket_id)
+{
+	struct aesni_gcm_qp *qp = NULL;
+	struct aesni_gcm_private *internals = dev->data->dev_private;
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		aesni_gcm_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
+					RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL)
+		return (-ENOMEM);
+
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
+		goto qp_setup_cleanup;
+
+	qp->ops = &gcm_ops[internals->vector_mode];
+
+	qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
+			qp_conf->nb_descriptors, socket_id);
+	if (qp->processed_pkts == NULL)
+		goto qp_setup_cleanup;
+
+	qp->sess_mp = dev->data->session_pool;
+
+	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+	return 0;
+
+qp_setup_cleanup:
+	if (qp)
+		rte_free(qp);
+
+	return -1;
+}
+
+/** Start queue pair */
+static int
+aesni_gcm_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint16_t queue_pair_id)
+{
+	return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+aesni_gcm_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint16_t queue_pair_id)
+{
+	return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev)
+{
+	return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni gcm session structure */
+static unsigned
+aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct aesni_gcm_session);
+}
+
+/** Configure a aesni gcm session from a crypto xform chain */
+static void *
+aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_sym_xform *xform,	void *sess)
+{
+	struct aesni_gcm_private *internals = dev->data->dev_private;
+
+	if (unlikely(sess == NULL)) {
+		GCM_LOG_ERR("invalid session struct");
+		return NULL;
+	}
+
+	if (aesni_gcm_set_session_parameters(&gcm_ops[internals->vector_mode],
+			sess, xform) != 0) {
+		GCM_LOG_ERR("failed configure session parameters");
+		return NULL;
+	}
+
+	return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+	if (sess)
+		memset(sess, 0, sizeof(struct aesni_gcm_session));
+}
+
+struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
+		.dev_configure		= aesni_gcm_pmd_config,
+		.dev_start		= aesni_gcm_pmd_start,
+		.dev_stop		= aesni_gcm_pmd_stop,
+		.dev_close		= aesni_gcm_pmd_close,
+
+		.stats_get		= aesni_gcm_pmd_stats_get,
+		.stats_reset		= aesni_gcm_pmd_stats_reset,
+
+		.dev_infos_get		= aesni_gcm_pmd_info_get,
+
+		.queue_pair_setup	= aesni_gcm_pmd_qp_setup,
+		.queue_pair_release	= aesni_gcm_pmd_qp_release,
+		.queue_pair_start	= aesni_gcm_pmd_qp_start,
+		.queue_pair_stop	= aesni_gcm_pmd_qp_stop,
+		.queue_pair_count	= aesni_gcm_pmd_qp_count,
+
+		.session_get_size	= aesni_gcm_pmd_session_get_size,
+		.session_configure	= aesni_gcm_pmd_session_configure,
+		.session_clear		= aesni_gcm_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
new file mode 100644
index 0000000..a42f941
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -0,0 +1,120 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_AESNI_GCM_PMD_PRIVATE_H_
+#define _RTE_AESNI_GCM_PMD_PRIVATE_H_
+
+#include "aesni_gcm_ops.h"
+
+#define GCM_LOG_ERR(fmt, args...) \
+	RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",  \
+			CRYPTODEV_NAME_AESNI_GCM_PMD, \
+			__func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+#define GCM_LOG_INFO(fmt, args...) \
+	RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			CRYPTODEV_NAME_AESNI_GCM_PMD, \
+			__func__, __LINE__, ## args)
+
+#define GCM_LOG_DBG(fmt, args...) \
+	RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			CRYPTODEV_NAME_AESNI_GCM_PMD, \
+			__func__, __LINE__, ## args)
+#else
+#define GCM_LOG_INFO(fmt, args...)
+#define GCM_LOG_DBG(fmt, args...)
+#endif
+
+
+/** private data structure for each virtual AESNI GCM device */
+struct aesni_gcm_private {
+	enum aesni_gcm_vector_mode vector_mode;
+	/**< Vector mode */
+	unsigned max_nb_queue_pairs;
+	/**< Max number of queue pairs supported by device */
+	unsigned max_nb_sessions;
+	/**< Max number of sessions supported by device */
+};
+
+struct aesni_gcm_qp {
+	uint16_t id;
+	/**< Queue Pair Identifier */
+	char name[RTE_CRYPTODEV_NAME_LEN];
+	/**< Unique Queue Pair Name */
+	const struct aesni_gcm_ops *ops;
+	/**< Architecture dependent function pointer table of the gcm APIs */
+	struct rte_ring *processed_pkts;
+	/**< Ring for placing process packets */
+	struct rte_mempool *sess_mp;
+	/**< Session Mempool */
+	struct rte_cryptodev_stats qp_stats;
+	/**< Queue pair statistics */
+} __rte_cache_aligned;
+
+
+enum aesni_gcm_operation {
+	AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION,
+	AESNI_GCM_OP_AUTHENTICATED_DECRYPTION
+};
+
+/** AESNI GCM private session structure */
+struct aesni_gcm_session {
+	enum aesni_gcm_operation op;
+	/**< GCM operation type */
+	struct gcm_data gdata __rte_cache_aligned;
+	/**< GCM parameters */
+};
+
+
+/**
+ * Setup GCM session parameters
+ * @param	ops	gcm ops function pointer table
+ * @param	sess	aesni gcm session structure
+ * @param	xform	crypto transform chain
+ *
+ * @return
+ * - On success returns 0
+ * - On failure returns error code < 0
+ */
+extern int
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops,
+		struct aesni_gcm_session *sess,
+		const struct rte_crypto_sym_xform *xform);
+
+
+/**
+ * Device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops;
+
+
+#endif /* _RTE_AESNI_GCM_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map b/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
new file mode 100644
index 0000000..dc4d417
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
@@ -0,0 +1,3 @@
+DPDK_16.04 {
+	local: *;
+};
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 53cca22..eb85c46 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -56,6 +56,8 @@ extern "C" {
 /**< Null crypto PMD device name */
 #define CRYPTODEV_NAME_AESNI_MB_PMD	("cryptodev_aesni_mb_pmd")
 /**< AES-NI Multi buffer PMD device name */
+#define CRYPTODEV_NAME_AESNI_GCM_PMD	("cryptodev_aesni_gcm_pmd")
+/**< AES-NI GCM PMD device name */
 #define CRYPTODEV_NAME_QAT_SYM_PMD	("cryptodev_qat_sym_pmd")
 /**< Intel QAT Symmetric Crypto PMD device name */
 #define CRYPTODEV_NAME_SNOW3G_PMD	("cryptodev_snow3g_pmd")
@@ -64,6 +66,7 @@ extern "C" {
 /** Crypto device type */
 enum rte_cryptodev_type {
 	RTE_CRYPTODEV_NULL_PMD = 1,	/**< Null crypto PMD */
+	RTE_CRYPTODEV_AESNI_GCM_PMD,	/**< AES-NI GCM PMD */
 	RTE_CRYPTODEV_AESNI_MB_PMD,	/**< AES-NI multi buffer PMD */
 	RTE_CRYPTODEV_QAT_SYM_PMD,	/**< QAT PMD Symmetric Crypto */
 	RTE_CRYPTODEV_SNOW3G_PMD,	/**< SNOW 3G PMD */
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 7e46370..0725f12 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -102,8 +102,13 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD)       += -libverbs
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2)   += -lsze2
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT)    += -lxenstore
 _LDLIBS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD)      += -lgxio
-# QAT PMD has a dependency on libcrypto (from openssl) for calculating HMAC precomputes
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT)        += -lcrypto
+# QAT / AESNI GCM PMDs are dependent on libcrypto (from openssl)
+# for calculating HMAC precomputes
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT),y)
+_LDLIBS-y                                   += -lcrypto
+else ifeq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM),y)
+_LDLIBS-y                                   += -lcrypto
+endif
 endif # !CONFIG_RTE_BUILD_SHARED_LIBS
 
 _LDLIBS-y += --start-group
@@ -146,9 +151,15 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET)  += -lrte_pmd_af_packet
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL)       += -lrte_pmd_null
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT)        += -lrte_pmd_qat
 
-# AESNI MULTI BUFFER is dependent on the IPSec_MB library
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB)   += -lrte_pmd_aesni_mb
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB)   += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM)   += -lrte_pmd_aesni_gcm
+
+# AESNI MULTI BUFFER / GCM PMDs are dependent on the IPSec_MB library
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_MB),y)
+_LDLIBS-y += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+else ifeq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM),y)
+_LDLIBS-y += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+endif
 
 # SNOW3G PMD is dependent on the LIBSSO library
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G)     += -lrte_pmd_snow3g
-- 
2.5.0

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [PATCH v4] aesni_gcm: PMD to support AES_GCM crypto operations
  2016-03-10 16:41     ` [dpdk-dev] [PATCH v4] " Pablo de Lara
@ 2016-03-10 18:53       ` John Griffin
  2016-03-10 23:45         ` Thomas Monjalon
  2016-03-10 23:34       ` Thomas Monjalon
  1 sibling, 1 reply; 9+ messages in thread
From: John Griffin @ 2016-03-10 18:53 UTC (permalink / raw)
  To: Pablo de Lara, dev

On 10/03/16 16:41, Pablo de Lara wrote:
> From: Declan Doherty <declan.doherty@intel.com>
>
> This patch provides the implementation of an AES-NI accelerated crypto PMD
> which is dependent on Intel's multi-buffer library, see the white paper
> "Fast Multi-buffer IPsec Implementations on Intel®  Architecture  Processors"
>
> This PMD supports AES_GCM authenticated encryption and authenticated decryption using
> 128-bit AES keys
>
> The patch also contains the related unit tests functions for the implemented functionality
>
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
> --

Acked-by: John Griffin <john.griffin@intel.com>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [PATCH v4] aesni_gcm: PMD to support AES_GCM crypto operations
  2016-03-10 16:41     ` [dpdk-dev] [PATCH v4] " Pablo de Lara
  2016-03-10 18:53       ` John Griffin
@ 2016-03-10 23:34       ` Thomas Monjalon
  1 sibling, 0 replies; 9+ messages in thread
From: Thomas Monjalon @ 2016-03-10 23:34 UTC (permalink / raw)
  To: Pablo de Lara; +Cc: dev

2016-03-10 16:41, Pablo de Lara:
> +Intel AES-NI GCM PMD
> +M: Declan Doherty <declan.doherty@intel.com>
> +F: drivers/crypto/aesni_gcm/

Missing doc reference:
F: doc/guides/cryptodevs/aesni_gcm.rst

> +# AESNI MULTI BUFFER / GCM PMDs are dependent on the IPSec_MB library
> +ifeq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_MB),y)
> +_LDLIBS-y += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
> +else ifeq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM),y)
> +_LDLIBS-y += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
> +endif

This dependency should be set when building the PMD as a shared library also.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [PATCH v4] aesni_gcm: PMD to support AES_GCM crypto operations
  2016-03-10 18:53       ` John Griffin
@ 2016-03-10 23:45         ` Thomas Monjalon
  0 siblings, 0 replies; 9+ messages in thread
From: Thomas Monjalon @ 2016-03-10 23:45 UTC (permalink / raw)
  To: Pablo de Lara; +Cc: dev

2016-03-10 18:53, John Griffin:
> On 10/03/16 16:41, Pablo de Lara wrote:
> > From: Declan Doherty <declan.doherty@intel.com>
> >
> > This patch provides the implementation of an AES-NI accelerated crypto PMD
> > which is dependent on Intel's multi-buffer library, see the white paper
> > "Fast Multi-buffer IPsec Implementations on Intel®  Architecture  Processors"
> >
> > This PMD supports AES_GCM authenticated encryption and authenticated decryption using
> > 128-bit AES keys
> >
> > The patch also contains the related unit tests functions for the implemented functionality
> >
> > Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> > Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
> 
> Acked-by: John Griffin <john.griffin@intel.com>

Applied with trivial fixes, thanks.

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2016-03-10 23:47 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-01-30 13:09 [dpdk-dev] [PATCH] aesni_gcm: PMD to support AES_GCM crypto operations Declan Doherty
2016-01-30 16:40 ` O'Driscoll, Tim
2016-03-08 10:09 ` [dpdk-dev] [PATCH v2] " Pablo de Lara
2016-03-08 11:22   ` De Lara Guarch, Pablo
2016-03-08 11:26   ` [dpdk-dev] [PATCH v3] " Pablo de Lara
2016-03-10 16:41     ` [dpdk-dev] [PATCH v4] " Pablo de Lara
2016-03-10 18:53       ` John Griffin
2016-03-10 23:45         ` Thomas Monjalon
2016-03-10 23:34       ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).