DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 1/4] common/qat: add files specific to GEN5
@ 2023-12-20 13:26 Nishikant Nayak
  2023-12-20 13:26 ` [PATCH 2/4] common/qat: update common driver to support GEN5 Nishikant Nayak
                   ` (10 more replies)
  0 siblings, 11 replies; 47+ messages in thread
From: Nishikant Nayak @ 2023-12-20 13:26 UTC (permalink / raw)
  To: dev
  Cc: kai.ji, ciara.power, arkadiuszx.kusztal, Nishikant Nayak,
	Thomas Monjalon, Anatoly Burakov

Adding GEN5 files for handling GEN5 specific operaions.
These files are inherited from the existing files/APIs
which has some changes specific GEN5 requirements
Also updated the mailmap file.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
---
 .mailmap                                      |   1 +
 drivers/common/qat/dev/qat_dev_gen5.c         | 336 ++++++++++++++++++
 .../adf_transport_access_macros_gen5.h        |  51 +++
 .../adf_transport_access_macros_gen5vf.h      |  48 +++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen5.c  | 336 ++++++++++++++++++
 5 files changed, 772 insertions(+)
 create mode 100644 drivers/common/qat/dev/qat_dev_gen5.c
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen5.h
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen5vf.h
 create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen5.c

diff --git a/.mailmap b/.mailmap
index ab0742a382..ef8e0b79e5 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1027,6 +1027,7 @@ Ning Li <muziding001@163.com> <lining18@jd.com>
 Nipun Gupta <nipun.gupta@amd.com> <nipun.gupta@nxp.com>
 Nir Efrati <nir.efrati@intel.com>
 Nirmoy Das <ndas@suse.de>
+Nishikant Nayak <nishikanta.nayak@intel.com>
 Nithin Dabilpuram <ndabilpuram@marvell.com> <nithin.dabilpuram@caviumnetworks.com>
 Nitin Saxena <nitin.saxena@caviumnetworks.com>
 Nitzan Weller <nitzanwe@mellanox.com>
diff --git a/drivers/common/qat/dev/qat_dev_gen5.c b/drivers/common/qat/dev/qat_dev_gen5.c
new file mode 100644
index 0000000000..dc2bcd5650
--- /dev/null
+++ b/drivers/common/qat/dev/qat_dev_gen5.c
@@ -0,0 +1,336 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include <rte_pci.h>
+#include <rte_vfio.h>
+
+#include "qat_device.h"
+#include "qat_qp.h"
+#include "adf_transport_access_macros_gen5vf.h"
+#include "adf_pf2vf_msg.h"
+#include "qat_pf2vf.h"
+
+#include <stdint.h>
+#include <linux/kernel.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#define BITS_PER_LONG		(sizeof(unsigned long) * 8)
+#define BITS_PER_ULONG		(sizeof(unsigned long) * 8)
+
+#define VFIO_PCI_LCE_DEVICE_CFG_REGION_INDEX	VFIO_PCI_NUM_REGIONS
+#define VFIO_PCI_LCE_CY_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 2)
+#define VFIO_PCI_LCE_RING_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 4)
+
+#define min_t(type, x, y) ({                \
+				type __min1 = (x);                      \
+				type __min2 = (y);                      \
+				__min1 < __min2 ? __min1 : __min2; })
+
+/**
+ * struct lce_vfio_dev_cap - LCE device capabilities
+ *
+ * Device level capabilities and service level capabilities
+ */
+struct lce_vfio_dev_cap {
+	uint16_t device_num;
+	uint16_t device_type;
+
+#define LCE_DEVICE_CAP_DYNAMIC_BANK     BIT(31)
+	uint32_t capability_mask;
+	uint32_t extended_capabilities;
+	uint16_t max_banks;
+	uint16_t max_rings_per_bank;
+	uint16_t arb_mask;
+
+#define SERV_TYPE_DC    BIT(0)
+#define SERV_TYPE_SYM   BIT(1)
+#define SERV_TYPE_ASYM  BIT(2)
+#define SERV_TYPE_DMA   BIT(3)
+	uint16_t services;
+	uint16_t pkg_id;
+	uint16_t node_id;
+
+#define LCE_DEVICE_NAME_SIZE        64
+	__u8 device_name[LCE_DEVICE_NAME_SIZE];
+};
+
+#define LCE_DEVICE_MAX_BANKS    2080
+#define LCE_DEVICE_BITMAP_SIZE  \
+	__KERNEL_DIV_ROUND_UP(LCE_DEVICE_MAX_BANKS, BITS_PER_LONG)
+
+/* struct lce_vfio_dev_cy_cap - CY capabilities of LCE device */
+struct lce_vfio_dev_cy_cap {
+	uint32_t nr_banks;
+	unsigned long bitmap[LCE_DEVICE_BITMAP_SIZE];
+};
+
+#define LCE_QAT_NID_LOCAL	0x7
+#define LCE_QAT_FUNC_LOCAL	0x3ff
+#define LCE_QAT_RID_LOCAL	0xf
+#define LCE_QAT_PASID_LOCAL	0xfffff
+
+struct lce_qat_domain {
+	uint32_t nid        :3;
+	uint32_t fid        :7;
+	uint32_t ftype      :2;
+	uint32_t vfid       :13;
+	uint32_t rid        :4;
+	uint32_t vld        :1;
+	uint32_t desc_over  :1;
+	uint32_t pasid_vld  :1;
+	uint32_t pasid      :20;
+};
+
+struct lce_qat_buf_domain {
+	uint32_t bank_id:   20;
+#define LCE_REQ_BUFFER_DOMAIN   1UL
+#define LCE_RES_BUFFER_DOMAIN   2UL
+#define LCE_SRC_BUFFER_DOMAIN   4UL
+#define LCE_DST_BUFFER_DOMAIN   8UL
+	uint32_t type:      4;
+	uint32_t resv:      8;
+	struct lce_qat_domain dom;
+};
+
+/* QAT GEN 5 specific macros */
+#define QAT_GEN5_BUNDLE_NUM		LCE_DEVICE_MAX_BANKS
+#define QAT_GEN5_QPS_PER_BUNDLE_NUM	1
+
+struct qat_dev_gen5_extra {
+	struct qat_qp_hw_data
+	    qp_gen5_data[QAT_GEN5_BUNDLE_NUM][QAT_GEN5_QPS_PER_BUNDLE_NUM];
+};
+
+static struct qat_pf2vf_dev qat_pf2vf_gen5 = {
+	.pf2vf_offset = ADF_4XXXIOV_PF2VM_OFFSET,
+	.vf2pf_offset = ADF_4XXXIOV_VM2PF_OFFSET,
+	.pf2vf_type_shift = ADF_PFVF_2X_MSGTYPE_SHIFT,
+	.pf2vf_type_mask = ADF_PFVF_2X_MSGTYPE_MASK,
+	.pf2vf_data_shift = ADF_PFVF_2X_MSGDATA_SHIFT,
+	.pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK,
+};
+
+static int
+qat_select_valid_queue_gen5(struct qat_pci_device *qat_dev, int qp_id,
+			    enum qat_service_type service_type)
+{
+	int i = 0, valid_qps = 0;
+	struct qat_dev_gen5_extra *dev_extra = qat_dev->dev_private;
+
+	for (; i < QAT_GEN5_BUNDLE_NUM; i++) {
+		if (dev_extra->qp_gen5_data[i][0].service_type ==
+				service_type) {
+			if (valid_qps == qp_id)
+				return i;
+			++valid_qps;
+		}
+	}
+	return -1;
+}
+
+static const struct qat_qp_hw_data *
+qat_qp_get_hw_data_gen5(struct qat_pci_device *qat_dev,
+			enum qat_service_type service_type, uint16_t qp_id)
+{
+	struct qat_dev_gen5_extra *dev_extra = qat_dev->dev_private;
+	int ring_pair = qat_select_valid_queue_gen5(qat_dev, qp_id,
+						    service_type);
+
+	if (ring_pair < 0)
+		return NULL;
+
+	return &dev_extra->qp_gen5_data[ring_pair][0];
+}
+
+static int
+qat_qp_rings_per_service_gen5(struct qat_pci_device *qat_dev,
+			      enum qat_service_type service)
+{
+	int i = 0, count = 0, max_ops_per_srv = 0;
+	struct qat_dev_gen5_extra *dev_extra = qat_dev->dev_private;
+
+	max_ops_per_srv = QAT_GEN5_BUNDLE_NUM;
+	for (i = 0, count = 0; i < max_ops_per_srv; i++)
+		if (dev_extra->qp_gen5_data[i][0].service_type == service)
+			count++;
+	return count;
+}
+
+static int qat_dev_read_config(struct qat_pci_device *qat_dev)
+{
+	struct qat_dev_gen5_extra *dev_extra = qat_dev->dev_private;
+	struct qat_qp_hw_data *hw_data;
+
+	/** Enable only crypto ring: RP-0 */
+	hw_data = &dev_extra->qp_gen5_data[0][0];
+	memset(hw_data, 0, sizeof(*hw_data));
+
+	hw_data->service_type = QAT_SERVICE_SYMMETRIC;
+	hw_data->tx_msg_size = 128;
+	hw_data->rx_msg_size = 32;
+
+	hw_data->tx_ring_num = 0;
+	hw_data->rx_ring_num = 1;
+
+	hw_data->hw_bundle_num = 0;
+
+	return 0;
+}
+
+
+static int qat_dev_read_config_gen5(struct qat_pci_device *qat_dev)
+{
+	return qat_dev_read_config(qat_dev);
+}
+
+static void qat_qp_build_ring_base_gen5(void *io_addr, struct qat_queue *queue)
+{
+	uint64_t queue_base;
+
+	queue_base = BUILD_RING_BASE_ADDR_GEN5(queue->base_phys_addr,
+					       queue->queue_size);
+	WRITE_CSR_RING_BASE_GEN5VF(io_addr, queue->hw_bundle_number,
+				   queue->hw_queue_number, queue_base);
+}
+
+static void
+qat_qp_adf_arb_enable_gen5(const struct qat_queue *txq,
+			   void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+	    (ADF_RING_BUNDLE_SIZE_GEN5 *
+	     txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN5VF,
+			   arb_csr_offset);
+	value |= 0x01;
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_arb_disable_gen5(const struct qat_queue *txq,
+			    void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_RING_BUNDLE_SIZE_GEN5 *
+							txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN5VF,
+			   arb_csr_offset);
+	value &= ~(0x01);
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_configure_queues_gen5(struct qat_qp *qp)
+{
+	uint32_t q_tx_config, q_resp_config;
+	struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q;
+
+	/* q_tx/rx->queue_size is initialized as per bundle config register */
+	q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size);
+
+	q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size,
+					       ADF_RING_NEAR_WATERMARK_512,
+					       ADF_RING_NEAR_WATERMARK_0);
+
+	WRITE_CSR_RING_CONFIG_GEN5VF(qp->mmap_bar_addr, q_tx->hw_bundle_number,
+				     q_tx->hw_queue_number, q_tx_config);
+	WRITE_CSR_RING_CONFIG_GEN5VF(qp->mmap_bar_addr, q_rx->hw_bundle_number,
+				     q_rx->hw_queue_number, q_resp_config);
+}
+
+static void
+qat_qp_csr_write_tail_gen5(struct qat_qp *qp, struct qat_queue *q)
+{
+	WRITE_CSR_RING_TAIL_GEN5VF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, q->tail);
+}
+
+static void
+qat_qp_csr_write_head_gen5(struct qat_qp *qp, struct qat_queue *q,
+			   uint32_t new_head)
+{
+	WRITE_CSR_RING_HEAD_GEN5VF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, new_head);
+}
+
+static void
+qat_qp_csr_setup_gen5(struct qat_pci_device *qat_dev, void *io_addr,
+		      struct qat_qp *qp)
+{
+	qat_qp_build_ring_base_gen5(io_addr, &qp->tx_q);
+	qat_qp_build_ring_base_gen5(io_addr, &qp->rx_q);
+	qat_qp_adf_configure_queues_gen5(qp);
+	qat_qp_adf_arb_enable_gen5(&qp->tx_q, qp->mmap_bar_addr,
+				   &qat_dev->arb_csr_lock);
+}
+
+static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen5 = {
+	.qat_qp_rings_per_service = qat_qp_rings_per_service_gen5,
+	.qat_qp_build_ring_base = qat_qp_build_ring_base_gen5,
+	.qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen5,
+	.qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen5,
+	.qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen5,
+	.qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen5,
+	.qat_qp_csr_write_head = qat_qp_csr_write_head_gen5,
+	.qat_qp_csr_setup = qat_qp_csr_setup_gen5,
+	.qat_qp_get_hw_data = qat_qp_get_hw_data_gen5,
+};
+
+static int
+qat_reset_ring_pairs_gen5(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static const struct rte_mem_resource*
+qat_dev_get_transport_bar_gen5(struct rte_pci_device *pci_dev)
+{
+	return &pci_dev->mem_resource[0];
+}
+
+static int
+qat_dev_get_misc_bar_gen5(struct rte_mem_resource **mem_resource,
+			  struct rte_pci_device *pci_dev)
+{
+	*mem_resource = &pci_dev->mem_resource[2];
+	return 0;
+}
+
+static int
+qat_dev_get_extra_size_gen5(void)
+{
+	return sizeof(struct qat_dev_gen5_extra);
+}
+
+static int
+qat_dev_get_slice_map_gen5(uint32_t *map __rte_unused,
+	const struct rte_pci_device *pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen5 = {
+	.qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen5,
+	.qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen5,
+	.qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen5,
+	.qat_dev_read_config = qat_dev_read_config_gen5,
+	.qat_dev_get_extra_size = qat_dev_get_extra_size_gen5,
+	.qat_dev_get_slice_map = qat_dev_get_slice_map_gen5,
+};
+
+RTE_INIT(qat_dev_gen_5_init)
+{
+	qat_qp_hw_spec[QAT_GEN5] = &qat_qp_hw_spec_gen5;
+	qat_dev_hw_spec[QAT_GEN5] = &qat_dev_hw_spec_gen5;
+	qat_gen_config[QAT_GEN5].dev_gen = QAT_GEN5;
+	qat_gen_config[QAT_GEN5].pf2vf_dev = &qat_pf2vf_gen5;
+}
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen5.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen5.h
new file mode 100644
index 0000000000..29ce6b8e60
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen5.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN5_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN5_H
+
+#include "adf_transport_access_macros.h"
+
+#define ADF_RINGS_PER_INT_SRCSEL_GEN4 2
+#define ADF_BANK_INT_SRC_SEL_MASK_GEN4 0x44UL
+#define ADF_BANK_INT_FLAG_CLEAR_MASK_GEN4 0x3
+#define ADF_RING_BUNDLE_SIZE_GEN5 0x2000
+#define ADF_RING_CSR_RING_CONFIG_GEN5 0x1000
+#define ADF_RING_CSR_RING_LBASE_GEN5 0x1040
+#define ADF_RING_CSR_RING_UBASE_GEN5 0x1080
+
+#define BUILD_RING_BASE_ADDR_GEN5(addr, size) \
+	((((addr) >> 6) & (0xFFFFFFFFFFFFFFFFULL << (size))) << 6)
+
+#define WRITE_CSR_RING_BASE_GEN5(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32);	\
+	ADF_CSR_WR(csr_base_addr,	\
+		(ADF_RING_BUNDLE_SIZE_GEN5 * bank) +			\
+		ADF_RING_CSR_RING_LBASE_GEN5 + (ring << 2),		\
+		l_base);						\
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN5 * bank) +			\
+		ADF_RING_CSR_RING_UBASE_GEN5 + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN5(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN5 * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN5 + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN5(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN5 * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+
+#define WRITE_CSR_RING_HEAD_GEN5(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN5 * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+
+#endif
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen5vf.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen5vf.h
new file mode 100644
index 0000000000..5d2c6706a6
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen5vf.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN5VF_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN5VF_H
+
+#include "adf_transport_access_macros.h"
+#include "adf_transport_access_macros_gen5.h"
+
+#define ADF_RING_CSR_ADDR_OFFSET_GEN5VF 0x0
+
+#define WRITE_CSR_RING_BASE_GEN5VF(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN5VF, \
+		(ADF_RING_BUNDLE_SIZE_GEN5 * bank) + \
+		ADF_RING_CSR_RING_LBASE_GEN5 + (ring << 2),	\
+		l_base);	\
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN5VF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN5 * bank) + \
+		ADF_RING_CSR_RING_UBASE_GEN5 + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN5VF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN5VF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN5 * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN5 + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN5VF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN5VF, \
+		(ADF_RING_BUNDLE_SIZE_GEN5 * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_HEAD_GEN5VF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN5VF, \
+		(ADF_RING_BUNDLE_SIZE_GEN5 * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_SRV_ARB_EN_GEN5VF(csr_base_addr, bank, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN5VF, \
+		(ADF_RING_BUNDLE_SIZE_GEN5 * (bank)) + \
+		ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+
+#endif
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen5.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen5.c
new file mode 100644
index 0000000000..1f1242c5c0
--- /dev/null
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen5.c
@@ -0,0 +1,336 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
+#include "qat_sym_session.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
+#include "qat_crypto.h"
+#include "qat_crypto_pmd_gens.h"
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen5[] = {
+	QAT_SYM_AEAD_CAP(AES_GCM,
+		CAP_SET(block_size, 16),
+		CAP_RNG(key_size, 32, 32, 1), CAP_RNG(digest_size, 16, 16, 1),
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 1)),
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+qat_sgl_add_buffer_gen5(void *list_in, uint64_t addr, uint32_t len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr;
+
+	nr = list->num_bufs;
+
+	if (nr >= QAT_SYM_SGL_MAX_NUMBER) {
+		QAT_DP_LOG(ERR, "Adding %d entry failed, no empty SGL buffer", nr);
+		return -EINVAL;
+	}
+
+	list->buffers[nr].len = len;
+	list->buffers[nr].resrvd = 0;
+	list->buffers[nr].addr = addr;
+
+	list->num_bufs++;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+	QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+		nr, list->buffers[nr].len, list->buffers[nr].addr);
+#endif
+	return 0;
+}
+
+static int
+qat_sgl_fill_array_with_mbuf(struct rte_mbuf *buf, int64_t offset,
+	void *list_in, uint32_t data_len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr, buf_len;
+	int res = -EINVAL;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	uint32_t start_idx;
+	start_idx = list->num_bufs;
+#endif
+
+	/* Append to the existing list */
+	nr = list->num_bufs;
+
+	for (buf_len = 0; buf && nr < QAT_SYM_SGL_MAX_NUMBER; buf = buf->next) {
+		if (offset >= rte_pktmbuf_data_len(buf)) {
+			offset -= rte_pktmbuf_data_len(buf);
+			/* Jump to next mbuf */
+			continue;
+		}
+
+		list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset;
+		list->buffers[nr].resrvd = 0;
+		list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
+
+		offset = 0;
+		buf_len += list->buffers[nr].len;
+
+		if (buf_len >= data_len) {
+			list->buffers[nr].len -= buf_len - data_len;
+			res = 0;
+			break;
+		}
+		++nr;
+	}
+
+	if (unlikely(res != 0)) {
+		if (nr == QAT_SYM_SGL_MAX_NUMBER)
+			QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+				QAT_SYM_SGL_MAX_NUMBER);
+		else
+			QAT_DP_LOG(ERR, "Mbuf chain is too short");
+	} else {
+
+		list->num_bufs = ++nr;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+		for (nr = start_idx; nr < list->num_bufs; nr++) {
+			QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+				nr, list->buffers[nr].len,
+				list->buffers[nr].addr);
+		}
+#endif
+	}
+
+	return res;
+}
+
+static int
+qat_sym_build_op_aead_gen5(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_op *op = in_op;
+	uint64_t digest_phys_addr, aad_phys_addr;
+	uint16_t iv_len, aad_len, digest_len, key_len;
+	uint32_t cipher_ofs, iv_offset, cipher_len;
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct icp_qat_fw_la_cipher_30_req_params *cipher_param;
+	enum icp_qat_hw_cipher_dir dir;
+	bool is_digest_adjacent = false;
+
+	if (ctx->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER ||
+		ctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_AES256 ||
+		ctx->qat_mode != ICP_QAT_HW_CIPHER_AEAD_MODE) {
+
+		QAT_DP_LOG(ERR, "Not supported (cmd: %d, alg: %d, mode: %d). "
+			"GEN5 PMD only supports AES-256 AEAD mode",
+			ctx->qat_cmd, ctx->qat_cipher_alg, ctx->qat_mode);
+		return -EINVAL;
+	}
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+	cipher_param = (void *)&qat_req->serv_specif_rqpars;
+
+	dir = ctx->qat_dir;
+
+	aad_phys_addr = op->sym->aead.aad.phys_addr;
+	aad_len = ctx->aad_len;
+
+	iv_offset = ctx->cipher_iv.offset;
+	iv_len = ctx->cipher_iv.length;
+
+	cipher_ofs = op->sym->aead.data.offset;
+	cipher_len = op->sym->aead.data.length;
+
+	digest_phys_addr = op->sym->aead.digest.phys_addr;
+	digest_len = ctx->digest_length;
+
+	/* Upto 16B IV can be directly embedded in descriptor.
+	 * But GCM supports only 12B IV
+	 */
+	if (iv_len != GCM_IV_LENGTH) {
+		QAT_DP_LOG(ERR, "iv_len: %d not supported. Must be 12B.",
+			iv_len);
+		return -EINVAL;
+	}
+
+	rte_memcpy(cipher_param->u.cipher_IV_array,
+		rte_crypto_op_ctod_offset(op, uint8_t*, iv_offset),
+		iv_len);
+
+	/* Always SGL */
+	RTE_ASSERT((qat_req->comn_hdr.comn_req_flags &
+		ICP_QAT_FW_SYM_COMM_ADDR_SGL) == 1);
+	/* Always inplace */
+	RTE_ASSERT(op->sym->m_dst == NULL);
+
+	/* Key buffer address is already programmed by reusing the
+	 * content-descriptor buffer
+	 */
+	key_len = ctx->auth_key_length;
+
+	cipher_param->spc_aad_sz = aad_len;
+	cipher_param->cipher_length = key_len;
+	cipher_param->spc_auth_res_sz = digest_len;
+
+	/* Knowing digest is contiguous to cipher-text helps optimizing SGL */
+	if (rte_pktmbuf_iova_offset(op->sym->m_src, cipher_ofs + cipher_len)
+		== digest_phys_addr)
+		is_digest_adjacent = true;
+
+	/* SRC-SGL: 3 entries:
+	 * a) AAD
+	 * b) cipher
+	 * c) digest (only for decrypt and buffer is_NOT_adjacent)
+	 *
+	 */
+	cookie->qat_sgl_src.num_bufs = 0;
+	if (aad_len)
+		qat_sgl_add_buffer_gen5(&cookie->qat_sgl_src, aad_phys_addr,
+			aad_len);
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_src,
+			cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_src,
+			cipher_len);
+
+		/* Digest buffer in decrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_DECRYPT)
+			qat_sgl_add_buffer_gen5(&cookie->qat_sgl_src,
+				digest_phys_addr, digest_len);
+	}
+
+	/* (in-place) DST-SGL: 2 entries:
+	 * a) cipher
+	 * b) digest (only for encrypt and buffer is_NOT_adjacent)
+	 */
+	cookie->qat_sgl_dst.num_bufs = 0;
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_dst,
+			cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_dst,
+			cipher_len);
+
+		/* Digest buffer in Encrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+			qat_sgl_add_buffer_gen5(&cookie->qat_sgl_dst,
+				digest_phys_addr, digest_len);
+	}
+
+	/* Length values in 128B descriptor */
+	qat_req->comn_mid.src_length = cipher_len;
+	qat_req->comn_mid.dst_length = cipher_len;
+
+	if (dir == ICP_QAT_HW_CIPHER_ENCRYPT) /* Digest buffer in Encrypt job */
+		qat_req->comn_mid.dst_length += GCM_256_DIGEST_LEN;
+
+	/* src & dst SGL addresses in 128B descriptor */
+	qat_req->comn_mid.src_data_addr = cookie->qat_sgl_src_phys_addr;
+	qat_req->comn_mid.dest_data_addr = cookie->qat_sgl_dst_phys_addr;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+		sizeof(struct icp_qat_fw_la_bulk_req));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
+		rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+		rte_pktmbuf_data_len(op->sym->m_src));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
+		digest_len);
+	QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data, aad_len);
+#endif
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen5(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+
+	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
+		return -EINVAL;
+
+	/* build request for aead */
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES256 &&
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) {
+		build_request = qat_sym_build_op_aead_gen5;
+		if (build_request)
+			ctx->build_request[proc_type] = build_request;
+		else
+			return -EINVAL;
+	}
+	return 0;
+}
+
+
+static int
+qat_sym_crypto_cap_get_gen5(struct qat_cryptodev_private *internals,
+	const char *capa_memz_name,
+	const uint16_t __rte_unused slice_map)
+{
+	const uint32_t size = sizeof(qat_sym_crypto_caps_gen5);
+	uint32_t i;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+			size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities");
+			return -1;
+		}
+	}
+
+	struct rte_cryptodev_capabilities *addr =
+		(struct rte_cryptodev_capabilities *)
+		internals->capa_mz->addr;
+	const struct rte_cryptodev_capabilities *capabilities =
+		qat_sym_crypto_caps_gen5;
+	const uint32_t capa_num =
+		size / sizeof(struct rte_cryptodev_capabilities);
+	uint32_t curr_capa = 0;
+
+	for (i = 0; i < capa_num; i++) {
+		memcpy(addr + curr_capa, capabilities + i,
+			sizeof(struct rte_cryptodev_capabilities));
+		curr_capa++;
+	}
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	return 0;
+}
+
+RTE_INIT(qat_sym_crypto_gen5_init)
+{
+	qat_sym_gen_dev_ops[QAT_GEN5].cryptodev_ops = &qat_sym_crypto_ops_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN5].get_capabilities =
+			qat_sym_crypto_cap_get_gen5;
+	qat_sym_gen_dev_ops[QAT_GEN5].set_session =
+			qat_sym_crypto_set_session_gen5;
+	qat_sym_gen_dev_ops[QAT_GEN5].set_raw_dp_ctx = NULL;
+	qat_sym_gen_dev_ops[QAT_GEN5].get_feature_flags =
+			qat_sym_crypto_feature_flags_get_gen1;
+#ifdef RTE_LIB_SECURITY
+	qat_sym_gen_dev_ops[QAT_GEN5].create_security_ctx =
+			qat_sym_create_security_gen1;
+#endif
+}
+
+RTE_INIT(qat_asym_crypto_gen5_init)
+{
+	qat_asym_gen_dev_ops[QAT_GEN5].cryptodev_ops = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN5].get_capabilities = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN5].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN5].set_session = NULL;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH 2/4] common/qat: update common driver to support GEN5
  2023-12-20 13:26 [PATCH 1/4] common/qat: add files specific to GEN5 Nishikant Nayak
@ 2023-12-20 13:26 ` Nishikant Nayak
  2023-12-20 13:26 ` [PATCH 3/4] crypto/qat: update headers for GEN5 support Nishikant Nayak
                   ` (9 subsequent siblings)
  10 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2023-12-20 13:26 UTC (permalink / raw)
  To: dev; +Cc: kai.ji, ciara.power, arkadiuszx.kusztal, Nishikant Nayak

Adding GEN5 specific macros which is required for updating
the support for GEN5 features.
Also this patch adds other macros which is being used by GEN5
Specific APIs.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
---
 drivers/common/qat/meson.build                |  2 +
 .../qat/qat_adf/adf_transport_access_macros.h |  1 +
 drivers/common/qat/qat_adf/icp_qat_fw.h       | 27 ++++++++++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    | 51 +++++++++++++++++++
 drivers/common/qat/qat_common.h               |  1 +
 drivers/common/qat/qat_device.c               |  9 ++++
 6 files changed, 91 insertions(+)

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 5c36fbb270..35389e5aba 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -82,6 +82,7 @@ sources += files(
         'dev/qat_dev_gen2.c',
         'dev/qat_dev_gen3.c',
         'dev/qat_dev_gen4.c',
+        'dev/qat_dev_gen5.c',
 )
 includes += include_directories(
         'qat_adf',
@@ -108,6 +109,7 @@ if qat_crypto
             'dev/qat_crypto_pmd_gen2.c',
             'dev/qat_crypto_pmd_gen3.c',
             'dev/qat_crypto_pmd_gen4.c',
+            'dev/qat_crypto_pmd_gen5.c',
         ]
         sources += files(join_paths(qat_crypto_relpath, f))
     endforeach
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
index 12a7258c60..19bd812419 100644
--- a/drivers/common/qat/qat_adf/adf_transport_access_macros.h
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
@@ -47,6 +47,7 @@
 #define ADF_RING_SIZE_512 0x03
 #define ADF_RING_SIZE_4K 0x06
 #define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_64K 0x0A
 #define ADF_RING_SIZE_4M 0x10
 #define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
 #define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw.h b/drivers/common/qat/qat_adf/icp_qat_fw.h
index 3aa17ae041..b06b7ec989 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw.h
@@ -123,6 +123,11 @@ struct icp_qat_fw_comn_resp {
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_BITPOS 0
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_MASK 0x1
 
+/* GEN5 specific Common Header fields */
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS 5
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_MASK 0x3
+#define ICP_QAT_FW_COMN_GEN5_DESC_LAYOUT 3
+
 #define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
 	icp_qat_fw_comn_req_hdr_t.service_type
 
@@ -168,6 +173,12 @@ struct icp_qat_fw_comn_resp {
 	(((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
 	 ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
 
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN5(valid, desc_layout) \
+	((((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+	ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) | \
+	(((desc_layout) & ICP_QAT_FW_COMN_DESC_LAYOUT_MASK) << \
+	ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS))
+
 #define QAT_COMN_PTR_TYPE_BITPOS 0
 #define QAT_COMN_PTR_TYPE_MASK 0x1
 #define QAT_COMN_CD_FLD_TYPE_BITPOS 1
@@ -180,10 +191,20 @@ struct icp_qat_fw_comn_resp {
 #define QAT_COMN_EXT_FLAGS_MASK 0x1
 #define QAT_COMN_EXT_FLAGS_USED 0x1
 
+/* GEN5 specific Common Request Flags fields */
+#define QAT_COMN_KEYBUF_USAGE_BITPOS 1
+#define QAT_COMN_KEYBUF_USAGE_MASK 0x1
+#define QAT_COMN_KEY_BUFFER_USED 1
+
 #define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
 	((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
 	 | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
 
+#define ICP_QAT_FW_COMN_FLAGS_BUILD_GEN5(ptr, keybuf) \
+	((((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS) | \
+	 (((keybuf) & QAT_COMN_PTR_TYPE_MASK) << \
+	   QAT_COMN_KEYBUF_USAGE_BITPOS))
+
 #define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
 	QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
 
@@ -249,6 +270,8 @@ struct icp_qat_fw_comn_resp {
 #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1
+#define QAT_COMN_RESP_INVALID_PARAM_BITPOS 1
+#define QAT_COMN_RESP_INVALID_PARAM_MASK 0x1
 #define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0
 #define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1
 
@@ -280,6 +303,10 @@ struct icp_qat_fw_comn_resp {
 	QAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \
 	QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK)
 
+#define ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_INVALID_PARAM_BITPOS, \
+	QAT_COMN_RESP_INVALID_PARAM_MASK)
+
 #define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
 #define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
 #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index 70f0effa62..f61241d12a 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -22,11 +22,18 @@ enum icp_qat_fw_la_cmd_id {
 	ICP_QAT_FW_LA_CMD_DELIMITER = 18
 };
 
+/* In GEN5 Command ID 4 corresponds to AEAD */
+#define ICP_QAT_FW_LA_CMD_AEAD 4
+
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 #define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 
+/* GEN5 Hash, HMAC and GCM Verification Status */
+#define ICP_QAT_FW_LA_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_OK
+
+
 struct icp_qat_fw_la_bulk_req {
 	struct icp_qat_fw_comn_req_hdr comn_hdr;
 	struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
@@ -81,6 +88,18 @@ struct icp_qat_fw_la_bulk_req {
 #define ICP_QAT_FW_LA_PARTIAL_END 2
 #define QAT_LA_PARTIAL_BITPOS 0
 #define QAT_LA_PARTIAL_MASK 0x3
+
+/* GEN5 specific Crypto Flags fields */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS 6
+#define ICP_QAT_FW_SYM_AEAD_ALGO_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_SIZE_BITPOS 9
+#define ICP_QAT_FW_SYM_IV_SIZE_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS 11
+#define ICP_QAT_FW_SYM_IV_IN_DESC_MASK 0x1
+#define ICP_QAT_FW_SYM_IV_IN_DESC_VALID 1
+#define ICP_QAT_FW_SYM_DIRECTION_BITPOS 15
+#define ICP_QAT_FW_SYM_DIRECTION_MASK 0x1
+
 #define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
 	cmp_auth, ret_auth, update_state, \
 	ciph_iv, ciphcfg, partial) \
@@ -188,6 +207,23 @@ struct icp_qat_fw_la_bulk_req {
 	QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
 	QAT_LA_PARTIAL_MASK)
 
+/* GEN5 specific Crypto Flags operations */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_SET(flags, val) \
+		QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS, \
+		ICP_QAT_FW_SYM_AEAD_ALGO_MASK)
+
+#define ICP_QAT_FW_SYM_IV_SIZE_SET(flags, val) \
+		QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_SIZE_BITPOS, \
+		ICP_QAT_FW_SYM_IV_SIZE_MASK)
+
+#define ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(flags, val) \
+		QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS, \
+		ICP_QAT_FW_SYM_IV_IN_DESC_MASK)
+
+#define ICP_QAT_FW_SYM_DIR_FLAG_SET(flags, val) \
+		QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_DIRECTION_BITPOS, \
+		ICP_QAT_FW_SYM_DIRECTION_MASK)
+
 #define QAT_FW_LA_MODE2 1
 #define QAT_FW_LA_NO_MODE2 0
 #define QAT_FW_LA_MODE2_MASK 0x1
@@ -410,4 +446,19 @@ struct icp_qat_fw_la_cipher_20_req_params {
 	uint8_t    spc_auth_res_sz;
 };
 
+struct icp_qat_fw_la_cipher_30_req_params {
+		uint32_t   spc_aad_sz;
+		uint8_t    cipher_length;
+		uint8_t    reserved[2];
+		uint8_t    spc_auth_res_sz;
+		union {
+				uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+				struct {
+						uint64_t cipher_IV_ptr;
+						uint64_t resrvd1;
+			} s;
+
+		} u;
+};
+
 #endif
diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h
index 9411a79301..dc48a2e1ee 100644
--- a/drivers/common/qat/qat_common.h
+++ b/drivers/common/qat/qat_common.h
@@ -21,6 +21,7 @@ enum qat_device_gen {
 	QAT_GEN2,
 	QAT_GEN3,
 	QAT_GEN4,
+	QAT_GEN5,
 	QAT_N_GENS
 };
 
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index f55dc3c6f0..d4f5391d12 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -62,6 +62,12 @@ static const struct rte_pci_id pci_id_qat_map[] = {
 		{
 			RTE_PCI_DEVICE(0x8086, 0x4945),
 		},
+		{
+			RTE_PCI_DEVICE(0x8086, 0x1454), /* GEN5: AVFs */
+		},
+		{
+			RTE_PCI_DEVICE(0x8086, 0x1456), /* GEN5: CPF */
+		},
 		{.device_id = 0},
 };
 
@@ -199,6 +205,9 @@ pick_gen(const struct rte_pci_device *pci_dev)
 	case 0x4943:
 	case 0x4945:
 		return QAT_GEN4;
+	case 0x1454: /* QAT30: AVF */
+	case 0x1456: /* QAT30: CPF-mdev */
+		return QAT_GEN5;
 	default:
 		QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
 		return QAT_N_GENS;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH 3/4] crypto/qat: update headers for GEN5 support
  2023-12-20 13:26 [PATCH 1/4] common/qat: add files specific to GEN5 Nishikant Nayak
  2023-12-20 13:26 ` [PATCH 2/4] common/qat: update common driver to support GEN5 Nishikant Nayak
@ 2023-12-20 13:26 ` Nishikant Nayak
  2023-12-20 13:26 ` [PATCH 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
                   ` (8 subsequent siblings)
  10 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2023-12-20 13:26 UTC (permalink / raw)
  To: dev
  Cc: kai.ji, ciara.power, arkadiuszx.kusztal, Nishikant Nayak,
	Akhil Goyal, Fan Zhang

This patch handles the changes required for updating the common
header fields specific to GEN5, Also added/updated of the response
processing APIs based on GEN5 requirement.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
---
 drivers/crypto/qat/qat_sym.c         | 10 ++++-
 drivers/crypto/qat/qat_sym.h         | 60 +++++++++++++++++++++++++++-
 drivers/crypto/qat/qat_sym_session.c | 52 ++++++++++++++++++++++++
 drivers/crypto/qat/qat_sym_session.h |  5 ++-
 lib/cryptodev/rte_crypto_sym.h       |  3 ++
 5 files changed, 126 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 6e03bde841..8fbb8831ab 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -179,8 +179,14 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops,
-				qat_sym_process_response, nb_ops);
+	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+
+	if (tmp_qp->qat_dev_gen == QAT_GEN5)
+		return qat_dequeue_op_burst(qp, (void **)ops,
+				qat_sym_process_response_gen5, nb_ops);
+	else
+		return qat_dequeue_op_burst(qp, (void **)ops,
+					qat_sym_process_response, nb_ops);
 }
 
 int
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index 71e9d5f34b..7db21fc341 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -95,6 +95,12 @@
 /* Maximum data length for single pass GMAC: 2^14-1 */
 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
 
+/* Digest length for GCM Algo is 16 bytes */
+#define GCM_256_DIGEST_LEN	16
+
+/* IV length for GCM algo is 12 bytes */
+#define GCM_IV_LENGTH      12
+
 struct qat_sym_session;
 
 struct qat_sym_sgl {
@@ -383,6 +389,52 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
 	return 1;
 }
 
+static __rte_always_inline int
+qat_sym_process_response_gen5(void **op, uint8_t *resp,
+	void *op_cookie __rte_unused,
+	uint64_t *dequeue_err_count __rte_unused)
+{
+	struct icp_qat_fw_comn_resp *resp_msg =
+		(struct icp_qat_fw_comn_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+		(resp_msg->opaque_data);
+	struct qat_sym_session *sess;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+		sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+	sess = CRYPTODEV_GET_SYM_SESS_PRIV(rx_op->sym->session);
+
+	rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+		ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+	else if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+		ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+	if (sess->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		if (ICP_QAT_FW_LA_VER_STATUS_FAIL ==
+			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+				resp_msg->comn_hdr.comn_status))
+			rx_op->status =	RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	}
+
+	*op = (void *)rx_op;
+
+	/*
+	 * return 1 as dequeue op only move on to the next op
+	 * if one was ready to return to API
+	 */
+	return 1;
+}
+
 int
 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
@@ -448,7 +500,13 @@ qat_sym_preprocess_requests(void **ops __rte_unused,
 
 static inline void
 qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
-	void *op_cookie __rte_unused)
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
+{
+}
+
+static inline void
+qat_sym_process_response_gen5(void **op __rte_unused, uint8_t *resp __rte_unused,
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
 {
 }
 
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 9f4f6c3d93..c97d6509b8 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -136,6 +136,9 @@ qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 static void
 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
 
+static void
+qat_sym_session_init_gen5_hdr(struct qat_sym_session *session);
+
 /* Req/cd init functions */
 
 static void
@@ -738,6 +741,12 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		session->qat_cmd);
 		return -ENOTSUP;
 	}
+
+	if (qat_dev_gen == QAT_GEN5) {
+		qat_sym_session_init_gen5_hdr(session);
+		return 0;
+	}
+
 	qat_sym_session_finalize(session);
 
 	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
@@ -1082,6 +1091,12 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 		if (qat_sym_cd_cipher_set(session,
 				aead_xform->key.data, aead_xform->key.length))
 			return -EINVAL;
+
+		if (qat_dev_gen == QAT_GEN5) {
+			session->auth_key_length = aead_xform->key.length;
+			memcpy(session->key_array, aead_xform->key.data,
+				aead_xform->key.length);
+		}
 	} else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
 			aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
 			(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
@@ -1970,6 +1985,43 @@ qat_sym_session_init_common_hdr(struct qat_sym_session *session)
 					ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
 }
 
+static void
+qat_sym_session_init_gen5_hdr(struct qat_sym_session *session)
+{
+	struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+	/*
+	 * GEN5 specifies separate command id for AEAD operations but Cryptodev
+	 * API processes AEAD operations as Single pass Crypto operations.
+	 * Hence even for GEN5, Session Algo Command ID is CIPHER.
+	 * Note, however Session Algo Mode is AEAD.
+	 */
+	header->service_cmd_id = ICP_QAT_FW_LA_CMD_AEAD;
+	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+	header->hdr_flags =
+	ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN5(ICP_QAT_FW_COMN_REQ_FLAG_SET,
+			ICP_QAT_FW_COMN_GEN5_DESC_LAYOUT);
+	header->comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD_GEN5(QAT_COMN_PTR_TYPE_SGL,
+			QAT_COMN_KEY_BUFFER_USED);
+
+	ICP_QAT_FW_SYM_AEAD_ALGO_SET(header->serv_specif_flags,
+		RTE_CRYPTO_AEAD_AES_GCM_GEN5);
+	ICP_QAT_FW_SYM_IV_SIZE_SET(header->serv_specif_flags,
+		ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+	ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(header->serv_specif_flags,
+		ICP_QAT_FW_SYM_IV_IN_DESC_VALID);
+
+	if (session->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
+			ICP_QAT_HW_CIPHER_DECRYPT);
+	} else {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
+			ICP_QAT_HW_CIPHER_ENCRYPT);
+	}
+}
+
 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 						const uint8_t *cipherkey,
 						uint32_t cipherkeylen)
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 9209e2e8df..821c53dfbb 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -111,7 +111,10 @@ struct qat_sym_session {
 	enum icp_qat_hw_auth_op auth_op;
 	enum icp_qat_hw_auth_mode auth_mode;
 	void *bpi_ctx;
-	struct qat_sym_cd cd;
+	union {
+		struct qat_sym_cd cd;
+		uint8_t key_array[32];
+	};
 	uint8_t prefix_state[QAT_PREFIX_TBL_SIZE] __rte_cache_aligned;
 	uint8_t *cd_cur_ptr;
 	phys_addr_t cd_paddr;
diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index 53b18b9412..e545b1ba76 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -492,6 +492,9 @@ enum rte_crypto_aead_operation {
 	/**< Verify digest and decrypt */
 };
 
+/* In GEN5 AEAD AES GCM Algorithm has ID 0 */
+#define RTE_CRYPTO_AEAD_AES_GCM_GEN5 0
+
 /** Authentication operation name strings */
 extern const char *
 rte_crypto_aead_operation_strings[];
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH 4/4] test/cryptodev: add tests for GCM with AAD
  2023-12-20 13:26 [PATCH 1/4] common/qat: add files specific to GEN5 Nishikant Nayak
  2023-12-20 13:26 ` [PATCH 2/4] common/qat: update common driver to support GEN5 Nishikant Nayak
  2023-12-20 13:26 ` [PATCH 3/4] crypto/qat: update headers for GEN5 support Nishikant Nayak
@ 2023-12-20 13:26 ` Nishikant Nayak
  2024-02-23 15:17 ` [PATCH 1/4] common/qat: add files specific to GEN5 Power, Ciara
                   ` (7 subsequent siblings)
  10 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2023-12-20 13:26 UTC (permalink / raw)
  To: dev
  Cc: kai.ji, ciara.power, arkadiuszx.kusztal, Nishikant Nayak,
	Akhil Goyal, Fan Zhang

Adding one new unit test code for validating the features
added as part of GCM with 64 byte AAD.
The new test case adds one new test for GCM algo for both
encrypt and decrypt operations.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
---
 app/test/test_cryptodev.c                   | 48 +++++++++++++---
 app/test/test_cryptodev_aead_test_vectors.h | 62 +++++++++++++++++++++
 2 files changed, 103 insertions(+), 7 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 58561ededf..73581db8cd 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -12156,6 +12156,18 @@ test_AES_GCM_auth_decryption_test_case_256_7(void)
 	return test_authenticated_decryption(&gcm_test_case_256_7);
 }
 
+static int
+test_AES_GCM_auth_decryption_test_case_256_8(void)
+{
+	return test_authenticated_decryption(&gcm_test_case_256_8);
+}
+
+static int
+test_AES_GCM_auth_encryption_test_case_256_8(void)
+{
+	return test_authenticated_encryption(&gcm_test_case_256_8);
+}
+
 static int
 test_AES_GCM_auth_decryption_test_case_aad_1(void)
 {
@@ -12275,10 +12287,16 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
@@ -12381,16 +12399,22 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
-		return TEST_SKIPPED;
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
 
 	/* not supported with CPU crypto and raw data-path APIs*/
 	if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO ||
 			global_api_test_type == CRYPTODEV_RAW_API_TEST)
 		return TEST_SKIPPED;
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
+		return TEST_SKIPPED;
 
 	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
 			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
@@ -15411,10 +15435,16 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	/*
@@ -16853,6 +16883,8 @@ static struct unit_test_suite cryptodev_aes_gcm_auth_testsuite  = {
 			test_AES_GCM_auth_encryption_test_case_256_6),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_auth_encryption_test_case_256_7),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encryption_test_case_256_8),
 
 		/** AES GCM Authenticated Decryption 256 bits key */
 		TEST_CASE_ST(ut_setup, ut_teardown,
@@ -16869,6 +16901,8 @@ static struct unit_test_suite cryptodev_aes_gcm_auth_testsuite  = {
 			test_AES_GCM_auth_decryption_test_case_256_6),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_auth_decryption_test_case_256_7),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_decryption_test_case_256_8),
 
 		/** AES GCM Authenticated Encryption big aad size */
 		TEST_CASE_ST(ut_setup, ut_teardown,
diff --git a/app/test/test_cryptodev_aead_test_vectors.h b/app/test/test_cryptodev_aead_test_vectors.h
index 07292620a4..eadf206e4d 100644
--- a/app/test/test_cryptodev_aead_test_vectors.h
+++ b/app/test/test_cryptodev_aead_test_vectors.h
@@ -17,6 +17,16 @@ static uint8_t gcm_aad_text[MAX_AAD_LENGTH] = {
 		0x00, 0xf1, 0xe2, 0xd3, 0xc4, 0xb5, 0xa6, 0x97,
 		0x88, 0x79, 0x6a, 0x5b, 0x4c, 0x3d, 0x2e, 0x1f };
 
+static uint8_t gcm_aad_64B_text[MAX_AAD_LENGTH] = {
+		0xED, 0x3E, 0xA8, 0x1F, 0x74, 0xE5, 0xD1, 0x96,
+		0xA4, 0xD5, 0x4B, 0x26, 0xBB, 0x20, 0x61, 0x7B,
+		0x3B, 0x9C, 0x2A, 0x69, 0x90, 0xEF, 0xD7, 0x9A,
+		0x94, 0xC2, 0xF5, 0x86, 0xBD, 0x00, 0xF6, 0xEA,
+		0x0B, 0x14, 0x24, 0xF2, 0x08, 0x67, 0x42, 0x3A,
+		0xB5, 0xB8, 0x32, 0x97, 0xB5, 0x99, 0x69, 0x75,
+		0x60, 0x00, 0x8F, 0xF7, 0x6F, 0x16, 0x52, 0x66,
+		0xF1, 0xA9, 0x38, 0xFD, 0xB0, 0x61, 0x60, 0xB5 };
+
 static uint8_t ccm_aad_test_1[8] = {
 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
 };
@@ -1736,6 +1746,58 @@ static const struct aead_test_data gcm_test_case_256_7 = {
 	}
 };
 
+static const struct aead_test_data gcm_test_case_256_8 = {
+	.algo = RTE_CRYPTO_AEAD_AES_GCM,
+	.key = {
+		.data = {
+			0xD8, 0xFD, 0x8F, 0x5A, 0x13, 0x7B, 0x05, 0x2C,
+			0xA4, 0x64, 0x7A, 0xDD, 0x1E, 0x9A, 0x68, 0x33,
+			0x04, 0x70, 0xE8, 0x1E, 0x42, 0x84, 0x64, 0xD2,
+			0x23, 0xA1, 0x6A, 0x0A, 0x05, 0x7B, 0x90, 0xDE},
+		.len = 32
+	},
+	.iv = {
+		.data = {
+			0x8D, 0xDF, 0xB8, 0x7F, 0xD0, 0x79, 0x77, 0x55,
+			0xD5, 0x48, 0x03, 0x05},
+		.len = 12
+	},
+	.aad = {
+		.data = gcm_aad_64B_text,
+		.len = 64
+	},
+	.plaintext = {
+		.data = {
+			0x4D, 0xBC, 0x2C, 0x7F, 0x25, 0x1F, 0x07, 0x25,
+			0x54, 0x8C, 0x43, 0xDB, 0xD8, 0x06, 0x9F, 0xBF,
+			0xCA, 0x60, 0xF4, 0xEF, 0x13, 0x87, 0xE8, 0x2F,
+			0x4D, 0x9D, 0x1D, 0x87, 0x9F, 0x91, 0x79, 0x7E,
+			0x3E, 0x98, 0xA3, 0x63, 0xC6, 0xFE, 0xDB, 0x35,
+			0x96, 0x59, 0xB2, 0x0C, 0x80, 0x96, 0x70, 0x07,
+			0x87, 0x42, 0xAB, 0x4F, 0x31, 0x73, 0xC4, 0xF9,
+			0xB0, 0x1E, 0xF1, 0xBC, 0x7D, 0x45, 0xE5, 0xF3},
+		.len = 64
+	},
+	.ciphertext = {
+	    .data = {
+			0x21, 0xFA, 0x59, 0x4F, 0x1F, 0x6B, 0x19, 0xC2,
+			0x68, 0xBC, 0x05, 0x93, 0x4E, 0x48, 0x6C, 0x5B,
+			0x0B, 0x7A, 0x43, 0xB7, 0x60, 0x8E, 0x00, 0xC4,
+			0xAB, 0x14, 0x6B, 0xCC, 0xA1, 0x27, 0x6A, 0xDE,
+			0x8E, 0xB6, 0x98, 0xBB, 0x4F, 0xD0, 0x6F, 0x30,
+			0x0F, 0x04, 0xA8, 0x5B, 0xDC, 0xD8, 0xE8, 0x8A,
+			0x73, 0xD9, 0xB8, 0x60, 0x7C, 0xE4, 0x32, 0x4C,
+			0x3A, 0x0B, 0xC2, 0x82, 0xDA, 0x88, 0x17, 0x69},
+	    .len = 64
+	},
+	.auth_tag = {
+		.data = {
+			0x3B, 0x80, 0x83, 0x72, 0xE5, 0x1B, 0x94, 0x15,
+			0x75, 0xC8, 0x62, 0xBC, 0xA1, 0x66, 0x91, 0x45},
+		.len = 16
+	}
+};
+
 /** variable AAD AES-GCM-128 Test Vectors */
 static const struct aead_test_data gcm_test_case_aad_1 = {
 	.algo = RTE_CRYPTO_AEAD_AES_GCM,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* RE: [PATCH 1/4] common/qat: add files specific to GEN5
  2023-12-20 13:26 [PATCH 1/4] common/qat: add files specific to GEN5 Nishikant Nayak
                   ` (2 preceding siblings ...)
  2023-12-20 13:26 ` [PATCH 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
@ 2024-02-23 15:17 ` Power, Ciara
  2024-02-26 13:03 ` [PATCH v2 0/4] add QAT GEN LCE device Nishikant Nayak
                   ` (6 subsequent siblings)
  10 siblings, 0 replies; 47+ messages in thread
From: Power, Ciara @ 2024-02-23 15:17 UTC (permalink / raw)
  To: Nayak, Nishikanta, dev
  Cc: Ji, Kai, Kusztal, ArkadiuszX, Thomas Monjalon, Burakov, Anatoly,
	Akhil Goyal



> -----Original Message-----
> From: Nayak, Nishikanta <nishikanta.nayak@intel.com>
> Sent: Wednesday, December 20, 2023 1:26 PM
> To: dev@dpdk.org
> Cc: Ji, Kai <kai.ji@intel.com>; Power, Ciara <ciara.power@intel.com>; Kusztal,
> ArkadiuszX <arkadiuszx.kusztal@intel.com>; Nayak, Nishikanta
> <nishikanta.nayak@intel.com>; Thomas Monjalon <thomas@monjalon.net>;
> Burakov, Anatoly <anatoly.burakov@intel.com>
> Subject: [PATCH 1/4] common/qat: add files specific to GEN5
> 
> Adding GEN5 files for handling GEN5 specific operaions.
> These files are inherited from the existing files/APIs which has some changes
> specific GEN5 requirements Also updated the mailmap file.
> 
> Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
> ---

A note on this one,
We will send a v2 of this patchset soon, renaming the device to GEN_LCE instead of GEN5.

This will avoid clashing with the patch I have just sent for another QAT device, that is named GEN5.
(https://patches.dpdk.org/project/dpdk/patch/20240223151255.3310490-5-ciara.power@intel.com/)

Thanks,
Ciara

^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v2 0/4] add QAT GEN LCE device
  2023-12-20 13:26 [PATCH 1/4] common/qat: add files specific to GEN5 Nishikant Nayak
                   ` (3 preceding siblings ...)
  2024-02-23 15:17 ` [PATCH 1/4] common/qat: add files specific to GEN5 Power, Ciara
@ 2024-02-26 13:03 ` Nishikant Nayak
  2024-02-26 13:03   ` [PATCH v2 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
                     ` (3 more replies)
  2024-02-27  9:35 ` [PATCH v3 0/4] add new QAT gen3 and gen5 Nishikant Nayak
                   ` (5 subsequent siblings)
  10 siblings, 4 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-26 13:03 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, Nishikant Nayak

This patchset adds a new QAT LCE device.
The device currently only supports symmetric crypto,
and only the AES-GCM algorithm.

v2:
   - Renamed device from GEN 5 to GEN LCE.
   - Removed unused code.
   - Updated macro names.

Nishikant Nayak (4):
  common/qat: add files specific to GEN LCE
  common/qat: update common driver to support GEN LCE
  crypto/qat: update headers for GEN LCE support
  test/cryptodev: add tests for GCM with AAD

 .mailmap                                      |   1 +
 app/test/test_cryptodev.c                     |  48 ++-
 app/test/test_cryptodev_aead_test_vectors.h   |  62 ++++
 drivers/common/qat/dev/qat_dev_gen_lce.c      | 306 ++++++++++++++++
 drivers/common/qat/meson.build                |   2 +
 .../qat/qat_adf/adf_transport_access_macros.h |   1 +
 .../adf_transport_access_macros_gen_lce.h     |  51 +++
 .../adf_transport_access_macros_gen_lcevf.h   |  48 +++
 drivers/common/qat/qat_adf/icp_qat_fw.h       |  34 ++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    |  59 +++-
 drivers/common/qat/qat_common.h               |   1 +
 drivers/common/qat/qat_device.c               |   9 +
 .../crypto/qat/dev/qat_crypto_pmd_gen_lce.c   | 329 ++++++++++++++++++
 drivers/crypto/qat/qat_sym.c                  |  16 +-
 drivers/crypto/qat/qat_sym.h                  |  66 +++-
 drivers/crypto/qat/qat_sym_session.c          |  62 +++-
 drivers/crypto/qat/qat_sym_session.h          |  10 +-
 17 files changed, 1089 insertions(+), 16 deletions(-)
 create mode 100644 drivers/common/qat/dev/qat_dev_gen_lce.c
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
 create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c

-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v2 1/4] common/qat: add files specific to GEN LCE
  2024-02-26 13:03 ` [PATCH v2 0/4] add QAT GEN LCE device Nishikant Nayak
@ 2024-02-26 13:03   ` Nishikant Nayak
  2024-02-26 13:03   ` [PATCH v2 2/4] common/qat: update common driver to support " Nishikant Nayak
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-26 13:03 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi,
	Nishikant Nayak, Thomas Monjalon, Anatoly Burakov

Adding GEN5 files for handling GEN LCE specific operaions.
These files are inherited from the existing files/APIs
which has some changes specific GEN5 requirements
Also updated the mailmap file.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
---
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
---
---
 .mailmap                                      |   1 +
 drivers/common/qat/dev/qat_dev_gen_lce.c      | 306 ++++++++++++++++
 drivers/common/qat/meson.build                |   2 +
 .../adf_transport_access_macros_gen_lce.h     |  51 +++
 .../adf_transport_access_macros_gen_lcevf.h   |  48 +++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    |  14 +
 drivers/common/qat/qat_common.h               |   1 +
 .../crypto/qat/dev/qat_crypto_pmd_gen_lce.c   | 329 ++++++++++++++++++
 drivers/crypto/qat/qat_sym.h                  |   6 +
 9 files changed, 758 insertions(+)
 create mode 100644 drivers/common/qat/dev/qat_dev_gen_lce.c
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
 create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c

diff --git a/.mailmap b/.mailmap
index 58cca13ef6..8008e5a899 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1036,6 +1036,7 @@ Ning Li <muziding001@163.com> <lining18@jd.com>
 Nipun Gupta <nipun.gupta@amd.com> <nipun.gupta@nxp.com>
 Nir Efrati <nir.efrati@intel.com>
 Nirmoy Das <ndas@suse.de>
+Nishikant Nayak <nishikanta.nayak@intel.com>
 Nithin Dabilpuram <ndabilpuram@marvell.com> <nithin.dabilpuram@caviumnetworks.com>
 Nitin Saxena <nitin.saxena@caviumnetworks.com>
 Nitzan Weller <nitzanwe@mellanox.com>
diff --git a/drivers/common/qat/dev/qat_dev_gen_lce.c b/drivers/common/qat/dev/qat_dev_gen_lce.c
new file mode 100644
index 0000000000..4cef0b8be2
--- /dev/null
+++ b/drivers/common/qat/dev/qat_dev_gen_lce.c
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_pci.h>
+#include <rte_vfio.h>
+
+#include "qat_device.h"
+#include "qat_qp.h"
+#include "adf_transport_access_macros_gen_lcevf.h"
+#include "adf_pf2vf_msg.h"
+#include "qat_pf2vf.h"
+
+#include <stdint.h>
+#include <linux/kernel.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#define BITS_PER_ULONG		(sizeof(unsigned long) * 8)
+
+#define VFIO_PCI_LCE_DEVICE_CFG_REGION_INDEX	VFIO_PCI_NUM_REGIONS
+#define VFIO_PCI_LCE_CY_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 2)
+#define VFIO_PCI_LCE_RING_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 4)
+#define LCE_DEVICE_NAME_SIZE			64
+#define LCE_DEVICE_MAX_BANKS			2080
+#define LCE_DEVICE_BITMAP_SIZE  \
+	__KERNEL_DIV_ROUND_UP(LCE_DEVICE_MAX_BANKS, BITS_PER_ULONG)
+
+/* QAT GEN_LCE specific macros */
+#define QAT_GEN_LCE_BUNDLE_NUM		LCE_DEVICE_MAX_BANKS
+#define QAT_GEN4_QPS_PER_BUNDLE_NUM	1
+
+/**
+ * struct lce_vfio_dev_cap - LCE device capabilities
+ *
+ * Device level capabilities and service level capabilities
+ */
+struct lce_vfio_dev_cap {
+	uint16_t device_num;
+	uint16_t device_type;
+	uint32_t capability_mask;
+	uint32_t extended_capabilities;
+	uint16_t max_banks;
+	uint16_t max_rings_per_bank;
+	uint16_t arb_mask;
+	uint16_t services;
+	uint16_t pkg_id;
+	uint16_t node_id;
+	__u8 device_name[LCE_DEVICE_NAME_SIZE];
+};
+
+/* struct lce_vfio_dev_cy_cap - CY capabilities of LCE device */
+struct lce_vfio_dev_cy_cap {
+	uint32_t nr_banks;
+	unsigned long bitmap[LCE_DEVICE_BITMAP_SIZE];
+};
+
+struct lce_qat_domain {
+	uint32_t nid        :3;
+	uint32_t fid        :7;
+	uint32_t ftype      :2;
+	uint32_t vfid       :13;
+	uint32_t rid        :4;
+	uint32_t vld        :1;
+	uint32_t desc_over  :1;
+	uint32_t pasid_vld  :1;
+	uint32_t pasid      :20;
+};
+
+struct lce_qat_buf_domain {
+	uint32_t bank_id:   20;
+	uint32_t type:      4;
+	uint32_t resv:      8;
+	struct lce_qat_domain dom;
+};
+
+struct qat_dev_gen_lce_extra {
+	struct qat_qp_hw_data
+	    qp_gen_lce_data[QAT_GEN_LCE_BUNDLE_NUM][QAT_GEN4_QPS_PER_BUNDLE_NUM];
+};
+
+static struct qat_pf2vf_dev qat_pf2vf_gen_lce = {
+	.pf2vf_offset = ADF_4XXXIOV_PF2VM_OFFSET,
+	.vf2pf_offset = ADF_4XXXIOV_VM2PF_OFFSET,
+	.pf2vf_type_shift = ADF_PFVF_2X_MSGTYPE_SHIFT,
+	.pf2vf_type_mask = ADF_PFVF_2X_MSGTYPE_MASK,
+	.pf2vf_data_shift = ADF_PFVF_2X_MSGDATA_SHIFT,
+	.pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK,
+};
+
+static int
+qat_select_valid_queue_gen_lce(struct qat_pci_device *qat_dev, int qp_id,
+			    enum qat_service_type service_type)
+{
+	int i = 0, valid_qps = 0;
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+	for (; i < QAT_GEN_LCE_BUNDLE_NUM; i++) {
+		if (dev_extra->qp_gen_lce_data[i][0].service_type ==
+				service_type) {
+			if (valid_qps == qp_id)
+				return i;
+			++valid_qps;
+		}
+	}
+	return -1;
+}
+
+static const struct qat_qp_hw_data *
+qat_qp_get_hw_data_gen_lce(struct qat_pci_device *qat_dev,
+			enum qat_service_type service_type, uint16_t qp_id)
+{
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+	int ring_pair = qat_select_valid_queue_gen_lce(qat_dev, qp_id,
+								service_type);
+
+	if (ring_pair < 0)
+		return NULL;
+
+	return &dev_extra->qp_gen_lce_data[ring_pair][0];
+}
+
+static int
+qat_qp_rings_per_service_gen_lce(struct qat_pci_device *qat_dev,
+			      enum qat_service_type service)
+{
+	int i = 0, count = 0, max_ops_per_srv = 0;
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+	max_ops_per_srv = QAT_GEN_LCE_BUNDLE_NUM;
+	for (i = 0, count = 0; i < max_ops_per_srv; i++)
+		if (dev_extra->qp_gen_lce_data[i][0].service_type == service)
+			count++;
+	return count;
+}
+
+static int qat_dev_read_config_gen_lce(struct qat_pci_device *qat_dev)
+{
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+	struct qat_qp_hw_data *hw_data;
+
+	/** Enable only crypto ring: RP-0 */
+	hw_data = &dev_extra->qp_gen_lce_data[0][0];
+	memset(hw_data, 0, sizeof(*hw_data));
+
+	hw_data->service_type = QAT_SERVICE_SYMMETRIC;
+	hw_data->tx_msg_size = 128;
+	hw_data->rx_msg_size = 32;
+
+	hw_data->tx_ring_num = 0;
+	hw_data->rx_ring_num = 1;
+
+	hw_data->hw_bundle_num = 0;
+
+	return 0;
+}
+
+static void qat_qp_build_ring_base_gen_lce(void *io_addr, struct qat_queue *queue)
+{
+	uint64_t queue_base;
+
+	queue_base = BUILD_RING_BASE_ADDR_GEN_LCE(queue->base_phys_addr,
+					       queue->queue_size);
+	WRITE_CSR_RING_BASE_GEN_LCEVF(io_addr, queue->hw_bundle_number,
+				   queue->hw_queue_number, queue_base);
+}
+
+static void
+qat_qp_adf_arb_enable_gen_lce(const struct qat_queue *txq,
+			   void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+	    (ADF_RING_BUNDLE_SIZE_GEN_LCE *
+	     txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,
+			   arb_csr_offset);
+	value |= 0x01;
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_arb_disable_gen_lce(const struct qat_queue *txq,
+			    void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_RING_BUNDLE_SIZE_GEN_LCE *
+							txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,
+			   arb_csr_offset);
+	value &= ~(0x01);
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_configure_queues_gen_lce(struct qat_qp *qp)
+{
+	uint32_t q_tx_config, q_resp_config;
+	struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q;
+
+	/* q_tx/rx->queue_size is initialized as per bundle config register */
+	q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size);
+
+	q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size,
+					       ADF_RING_NEAR_WATERMARK_512,
+					       ADF_RING_NEAR_WATERMARK_0);
+
+	WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_tx->hw_bundle_number,
+				     q_tx->hw_queue_number, q_tx_config);
+	WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_rx->hw_bundle_number,
+				     q_rx->hw_queue_number, q_resp_config);
+}
+
+static void
+qat_qp_csr_write_tail_gen_lce(struct qat_qp *qp, struct qat_queue *q)
+{
+	WRITE_CSR_RING_TAIL_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, q->tail);
+}
+
+static void
+qat_qp_csr_write_head_gen_lce(struct qat_qp *qp, struct qat_queue *q,
+			   uint32_t new_head)
+{
+	WRITE_CSR_RING_HEAD_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, new_head);
+}
+
+static void
+qat_qp_csr_setup_gen_lce(struct qat_pci_device *qat_dev, void *io_addr,
+		      struct qat_qp *qp)
+{
+	qat_qp_build_ring_base_gen_lce(io_addr, &qp->tx_q);
+	qat_qp_build_ring_base_gen_lce(io_addr, &qp->rx_q);
+	qat_qp_adf_configure_queues_gen_lce(qp);
+	qat_qp_adf_arb_enable_gen_lce(&qp->tx_q, qp->mmap_bar_addr,
+				   &qat_dev->arb_csr_lock);
+}
+
+static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen_lce = {
+	.qat_qp_rings_per_service = qat_qp_rings_per_service_gen_lce,
+	.qat_qp_build_ring_base = qat_qp_build_ring_base_gen_lce,
+	.qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen_lce,
+	.qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen_lce,
+	.qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen_lce,
+	.qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen_lce,
+	.qat_qp_csr_write_head = qat_qp_csr_write_head_gen_lce,
+	.qat_qp_csr_setup = qat_qp_csr_setup_gen_lce,
+	.qat_qp_get_hw_data = qat_qp_get_hw_data_gen_lce,
+};
+
+static int
+qat_reset_ring_pairs_gen_lce(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static const struct rte_mem_resource*
+qat_dev_get_transport_bar_gen_lce(struct rte_pci_device *pci_dev)
+{
+	return &pci_dev->mem_resource[0];
+}
+
+static int
+qat_dev_get_misc_bar_gen_lce(struct rte_mem_resource **mem_resource,
+			  struct rte_pci_device *pci_dev)
+{
+	*mem_resource = &pci_dev->mem_resource[2];
+	return 0;
+}
+
+static int
+qat_dev_get_extra_size_gen_lce(void)
+{
+	return sizeof(struct qat_dev_gen_lce_extra);
+}
+
+static int
+qat_dev_get_slice_map_gen_lce(uint32_t *map __rte_unused,
+	const struct rte_pci_device *pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen_lce = {
+	.qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen_lce,
+	.qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen_lce,
+	.qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen_lce,
+	.qat_dev_read_config = qat_dev_read_config_gen_lce,
+	.qat_dev_get_extra_size = qat_dev_get_extra_size_gen_lce,
+	.qat_dev_get_slice_map = qat_dev_get_slice_map_gen_lce,
+};
+
+RTE_INIT(qat_dev_gen_lce_init)
+{
+	qat_qp_hw_spec[QAT_GEN_LCE] = &qat_qp_hw_spec_gen_lce;
+	qat_dev_hw_spec[QAT_GEN_LCE] = &qat_dev_hw_spec_gen_lce;
+	qat_gen_config[QAT_GEN_LCE].dev_gen = QAT_GEN_LCE;
+	qat_gen_config[QAT_GEN_LCE].pf2vf_dev = &qat_pf2vf_gen_lce;
+}
diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 62abcb6fe3..bc7c3e5b85 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -82,6 +82,7 @@ sources += files(
         'dev/qat_dev_gen2.c',
         'dev/qat_dev_gen3.c',
         'dev/qat_dev_gen4.c',
+        'dev/qat_dev_gen_lce.c',
 )
 includes += include_directories(
         'qat_adf',
@@ -108,6 +109,7 @@ if qat_crypto
             'dev/qat_crypto_pmd_gen2.c',
             'dev/qat_crypto_pmd_gen3.c',
             'dev/qat_crypto_pmd_gen4.c',
+            'dev/qat_crypto_pmd_gen_lce.c',
         ]
         sources += files(join_paths(qat_crypto_relpath, f))
     endforeach
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
new file mode 100644
index 0000000000..c9df8f5dd2
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+
+#include "adf_transport_access_macros.h"
+
+#define ADF_RINGS_PER_INT_SRCSEL_GEN4 2
+#define ADF_BANK_INT_SRC_SEL_MASK_GEN4 0x44UL
+#define ADF_BANK_INT_FLAG_CLEAR_MASK_GEN4 0x3
+#define ADF_RING_BUNDLE_SIZE_GEN_LCE 0x2000
+#define ADF_RING_CSR_RING_CONFIG_GEN_LCE 0x1000
+#define ADF_RING_CSR_RING_LBASE_GEN_LCE 0x1040
+#define ADF_RING_CSR_RING_UBASE_GEN_LCE 0x1080
+
+#define BUILD_RING_BASE_ADDR_GEN_LCE(addr, size) \
+	((((addr) >> 6) & (0xFFFFFFFFFFFFFFFFULL << (size))) << 6)
+
+#define WRITE_CSR_RING_BASE_GEN_LCE(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32);	\
+	ADF_CSR_WR(csr_base_addr,	\
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) +			\
+		ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2),		\
+		l_base);						\
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) +			\
+		ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+
+#define WRITE_CSR_RING_HEAD_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+
+#endif
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
new file mode 100644
index 0000000000..3c7232de12
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+
+#include "adf_transport_access_macros.h"
+#include "adf_transport_access_macros_gen_lce.h"
+
+#define ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF 0x0
+
+#define WRITE_CSR_RING_BASE_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2),	\
+		l_base);	\
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_HEAD_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_SRV_ARB_EN_GEN_LCEVF(csr_base_addr, bank, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+
+#endif
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index 70f0effa62..215b291b74 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -410,4 +410,18 @@ struct icp_qat_fw_la_cipher_20_req_params {
 	uint8_t    spc_auth_res_sz;
 };
 
+struct icp_qat_fw_la_cipher_30_req_params {
+		uint32_t   spc_aad_sz;
+		uint8_t    cipher_length;
+		uint8_t    reserved[2];
+		uint8_t    spc_auth_res_sz;
+		union {
+				uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+				struct {
+						uint64_t cipher_IV_ptr;
+						uint64_t resrvd1;
+			} s;
+
+		} u;
+};
 #endif
diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h
index 9411a79301..642e009f28 100644
--- a/drivers/common/qat/qat_common.h
+++ b/drivers/common/qat/qat_common.h
@@ -21,6 +21,7 @@ enum qat_device_gen {
 	QAT_GEN2,
 	QAT_GEN3,
 	QAT_GEN4,
+	QAT_GEN_LCE,
 	QAT_N_GENS
 };
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
new file mode 100644
index 0000000000..cdd852600d
--- /dev/null
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
@@ -0,0 +1,329 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
+#include "qat_sym_session.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
+#include "qat_crypto.h"
+#include "qat_crypto_pmd_gens.h"
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen_lce[] = {
+	QAT_SYM_AEAD_CAP(AES_GCM,
+		CAP_SET(block_size, 16),
+		CAP_RNG(key_size, 32, 32, 0), CAP_RNG(digest_size, 16, 16, 0),
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+qat_sgl_add_buffer_gen_lce(void *list_in, uint64_t addr, uint32_t len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr;
+
+	nr = list->num_bufs;
+
+	if (nr >= QAT_SYM_SGL_MAX_NUMBER) {
+		QAT_DP_LOG(ERR, "Adding %d entry failed, no empty SGL buffer", nr);
+		return -EINVAL;
+	}
+
+	list->buffers[nr].len = len;
+	list->buffers[nr].resrvd = 0;
+	list->buffers[nr].addr = addr;
+
+	list->num_bufs++;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+	QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+		nr, list->buffers[nr].len, list->buffers[nr].addr);
+#endif
+	return 0;
+}
+
+static int
+qat_sgl_fill_array_with_mbuf(struct rte_mbuf *buf, int64_t offset,
+	void *list_in, uint32_t data_len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr, buf_len;
+	int res = -EINVAL;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	uint32_t start_idx;
+	start_idx = list->num_bufs;
+#endif
+
+	/* Append to the existing list */
+	nr = list->num_bufs;
+
+	for (buf_len = 0; buf && nr < QAT_SYM_SGL_MAX_NUMBER; buf = buf->next) {
+		if (offset >= rte_pktmbuf_data_len(buf)) {
+			offset -= rte_pktmbuf_data_len(buf);
+			/* Jump to next mbuf */
+			continue;
+		}
+
+		list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset;
+		list->buffers[nr].resrvd = 0;
+		list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
+
+		offset = 0;
+		buf_len += list->buffers[nr].len;
+
+		if (buf_len >= data_len) {
+			list->buffers[nr].len -= buf_len - data_len;
+			res = 0;
+			break;
+		}
+		++nr;
+	}
+
+	if (unlikely(res != 0)) {
+		if (nr == QAT_SYM_SGL_MAX_NUMBER)
+			QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+				QAT_SYM_SGL_MAX_NUMBER);
+		else
+			QAT_DP_LOG(ERR, "Mbuf chain is too short");
+	} else {
+
+		list->num_bufs = ++nr;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+		for (nr = start_idx; nr < list->num_bufs; nr++) {
+			QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+				nr, list->buffers[nr].len,
+				list->buffers[nr].addr);
+		}
+#endif
+	}
+
+	return res;
+}
+
+static int
+qat_sym_build_op_aead_gen_lce(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_op *op = in_op;
+	uint64_t digest_phys_addr, aad_phys_addr;
+	uint16_t iv_len, aad_len, digest_len, key_len;
+	uint32_t cipher_ofs, iv_offset, cipher_len;
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct icp_qat_fw_la_cipher_30_req_params *cipher_param;
+	enum icp_qat_hw_cipher_dir dir;
+	bool is_digest_adjacent = false;
+
+	if (ctx->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER ||
+		ctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_AES256 ||
+		ctx->qat_mode != ICP_QAT_HW_CIPHER_AEAD_MODE) {
+
+		QAT_DP_LOG(ERR, "Not supported (cmd: %d, alg: %d, mode: %d). "
+			"GEN_LCE PMD only supports AES-256 AEAD mode",
+			ctx->qat_cmd, ctx->qat_cipher_alg, ctx->qat_mode);
+		return -EINVAL;
+	}
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+	cipher_param = (void *)&qat_req->serv_specif_rqpars;
+
+	dir = ctx->qat_dir;
+
+	aad_phys_addr = op->sym->aead.aad.phys_addr;
+	aad_len = ctx->aad_len;
+
+	iv_offset = ctx->cipher_iv.offset;
+	iv_len = ctx->cipher_iv.length;
+
+	cipher_ofs = op->sym->aead.data.offset;
+	cipher_len = op->sym->aead.data.length;
+
+	digest_phys_addr = op->sym->aead.digest.phys_addr;
+	digest_len = ctx->digest_length;
+
+	/* Upto 16B IV can be directly embedded in descriptor.
+	 *  GCM supports only 12B IV for GEN LCE
+	 */
+	if (iv_len != GCM_IV_LENGTH_GEN_LCE) {
+		QAT_DP_LOG(ERR, "iv_len: %d not supported. Must be 12B.",
+			iv_len);
+		return -EINVAL;
+	}
+
+	rte_memcpy(cipher_param->u.cipher_IV_array,
+		rte_crypto_op_ctod_offset(op, uint8_t*, iv_offset),
+		iv_len);
+
+	/* Always SGL */
+	RTE_ASSERT((qat_req->comn_hdr.comn_req_flags &
+		ICP_QAT_FW_SYM_COMM_ADDR_SGL) == 1);
+	/* Always inplace */
+	RTE_ASSERT(op->sym->m_dst == NULL);
+
+	/* Key buffer address is already programmed by reusing the
+	 * content-descriptor buffer
+	 */
+	key_len = ctx->auth_key_length;
+
+	cipher_param->spc_aad_sz = aad_len;
+	cipher_param->cipher_length = key_len;
+	cipher_param->spc_auth_res_sz = digest_len;
+
+	/* Knowing digest is contiguous to cipher-text helps optimizing SGL */
+	if (rte_pktmbuf_iova_offset(op->sym->m_src, cipher_ofs + cipher_len)
+		== digest_phys_addr)
+		is_digest_adjacent = true;
+
+	/* SRC-SGL: 3 entries:
+	 * a) AAD
+	 * b) cipher
+	 * c) digest (only for decrypt and buffer is_NOT_adjacent)
+	 *
+	 */
+	cookie->qat_sgl_src.num_bufs = 0;
+	if (aad_len)
+		qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src, aad_phys_addr,
+			aad_len);
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_src,
+			cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_src,
+			cipher_len);
+
+		/* Digest buffer in decrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_DECRYPT)
+			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src,
+				digest_phys_addr, digest_len);
+	}
+
+	/* (in-place) DST-SGL: 2 entries:
+	 * a) cipher
+	 * b) digest (only for encrypt and buffer is_NOT_adjacent)
+	 */
+	cookie->qat_sgl_dst.num_bufs = 0;
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_dst,
+			cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_dst,
+			cipher_len);
+
+		/* Digest buffer in Encrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_dst,
+				digest_phys_addr, digest_len);
+	}
+
+	/* Length values in 128B descriptor */
+	qat_req->comn_mid.src_length = cipher_len;
+	qat_req->comn_mid.dst_length = cipher_len;
+
+	if (dir == ICP_QAT_HW_CIPHER_ENCRYPT) /* Digest buffer in Encrypt job */
+		qat_req->comn_mid.dst_length += GCM_256_DIGEST_LEN;
+
+	/* src & dst SGL addresses in 128B descriptor */
+	qat_req->comn_mid.src_data_addr = cookie->qat_sgl_src_phys_addr;
+	qat_req->comn_mid.dest_data_addr = cookie->qat_sgl_dst_phys_addr;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+		sizeof(struct icp_qat_fw_la_bulk_req));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
+		rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+		rte_pktmbuf_data_len(op->sym->m_src));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
+		digest_len);
+	QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data, aad_len);
+#endif
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen_lce(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+
+	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
+		return -EINVAL;
+
+	/* build request for aead */
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES256 &&
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) {
+		build_request = qat_sym_build_op_aead_gen_lce;
+		ctx->build_request[proc_type] = build_request;
+	}
+	return 0;
+}
+
+
+static int
+qat_sym_crypto_cap_get_gen_lce(struct qat_cryptodev_private *internals,
+	const char *capa_memz_name,
+	const uint16_t __rte_unused slice_map)
+{
+	const uint32_t size = sizeof(qat_sym_crypto_caps_gen_lce);
+	uint32_t i;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+			size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities");
+			return -1;
+		}
+	}
+
+	struct rte_cryptodev_capabilities *addr =
+		(struct rte_cryptodev_capabilities *)
+		internals->capa_mz->addr;
+	const struct rte_cryptodev_capabilities *capabilities =
+		qat_sym_crypto_caps_gen_lce;
+	const uint32_t capa_num =
+		size / sizeof(struct rte_cryptodev_capabilities);
+	uint32_t curr_capa = 0;
+
+	for (i = 0; i < capa_num; i++) {
+		memcpy(addr + curr_capa, capabilities + i,
+			sizeof(struct rte_cryptodev_capabilities));
+		curr_capa++;
+	}
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	return 0;
+}
+
+RTE_INIT(qat_sym_crypto_gen_lce_init)
+{
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = &qat_sym_crypto_ops_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_capabilities =
+			qat_sym_crypto_cap_get_gen_lce;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_session =
+			qat_sym_crypto_set_session_gen_lce;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_raw_dp_ctx = NULL;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags =
+			qat_sym_crypto_feature_flags_get_gen1;
+}
+
+RTE_INIT(qat_asym_crypto_gen_lce_init)
+{
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_capabilities = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].set_session = NULL;
+}
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f976009bf2..f2f197d050 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -95,6 +95,12 @@
 /* Maximum data length for single pass GMAC: 2^14-1 */
 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
 
+/* Digest length for GCM Algo is 16 bytes */
+#define GCM_256_DIGEST_LEN 16
+
+/* IV length for GCM algo is 12 bytes */
+#define GCM_IV_LENGTH_GEN_LCE 12
+
 struct qat_sym_session;
 
 struct qat_sym_sgl {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v2 2/4] common/qat: update common driver to support GEN LCE
  2024-02-26 13:03 ` [PATCH v2 0/4] add QAT GEN LCE device Nishikant Nayak
  2024-02-26 13:03   ` [PATCH v2 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
@ 2024-02-26 13:03   ` Nishikant Nayak
  2024-02-26 13:03   ` [PATCH v2 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
  2024-02-26 13:03   ` [PATCH v2 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
  3 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-26 13:03 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, Nishikant Nayak

Adding GEN LCE specific macros which is required for updating
the support for GEN LCE features.
Also this patch adds other macros which is being used by GEN LCE
Specific APIs.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
---
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
    - Fixed code formatting
---
---
 .../qat/qat_adf/adf_transport_access_macros.h |  1 +
 drivers/common/qat/qat_adf/icp_qat_fw.h       | 34 ++++++++++++++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    | 45 ++++++++++++++++++-
 drivers/common/qat/qat_device.c               |  9 ++++
 4 files changed, 88 insertions(+), 1 deletion(-)

diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
index 12a7258c60..19bd812419 100644
--- a/drivers/common/qat/qat_adf/adf_transport_access_macros.h
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
@@ -47,6 +47,7 @@
 #define ADF_RING_SIZE_512 0x03
 #define ADF_RING_SIZE_4K 0x06
 #define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_64K 0x0A
 #define ADF_RING_SIZE_4M 0x10
 #define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
 #define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw.h b/drivers/common/qat/qat_adf/icp_qat_fw.h
index 3aa17ae041..b78158e01d 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw.h
@@ -57,6 +57,12 @@ struct icp_qat_fw_comn_req_hdr_cd_pars {
 	} u;
 };
 
+struct lce_key_buff_desc {
+	uint64_t keybuff;
+	uint32_t keybuff_resrvd1;
+	uint32_t keybuff_resrvd2;
+};
+
 struct icp_qat_fw_comn_req_mid {
 	uint64_t opaque_data;
 	uint64_t src_data_addr;
@@ -123,6 +129,12 @@ struct icp_qat_fw_comn_resp {
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_BITPOS 0
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_MASK 0x1
 
+/* GEN_LCE specific Common Header fields */
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS 5
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_MASK 0x3
+#define ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT 3
+#define ICP_QAT_FW_COMN_GEN_LCE_STATUS_FLAG_ERROR 0
+
 #define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
 	icp_qat_fw_comn_req_hdr_t.service_type
 
@@ -168,6 +180,12 @@ struct icp_qat_fw_comn_resp {
 	(((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
 	 ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
 
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(valid, desc_layout) \
+	((((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+	ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) | \
+	(((desc_layout) & ICP_QAT_FW_COMN_DESC_LAYOUT_MASK) << \
+	ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS))
+
 #define QAT_COMN_PTR_TYPE_BITPOS 0
 #define QAT_COMN_PTR_TYPE_MASK 0x1
 #define QAT_COMN_CD_FLD_TYPE_BITPOS 1
@@ -180,10 +198,20 @@ struct icp_qat_fw_comn_resp {
 #define QAT_COMN_EXT_FLAGS_MASK 0x1
 #define QAT_COMN_EXT_FLAGS_USED 0x1
 
+/* GEN_LCE specific Common Request Flags fields */
+#define QAT_COMN_KEYBUF_USAGE_BITPOS 1
+#define QAT_COMN_KEYBUF_USAGE_MASK 0x1
+#define QAT_COMN_KEY_BUFFER_USED 1
+
 #define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
 	((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
 	 | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
 
+#define ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(ptr, keybuf) \
+	((((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS) | \
+	 (((keybuf) & QAT_COMN_PTR_TYPE_MASK) << \
+	   QAT_COMN_KEYBUF_USAGE_BITPOS))
+
 #define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
 	QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
 
@@ -249,6 +277,8 @@ struct icp_qat_fw_comn_resp {
 #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1
+#define QAT_COMN_RESP_INVALID_PARAM_BITPOS 1
+#define QAT_COMN_RESP_INVALID_PARAM_MASK 0x1
 #define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0
 #define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1
 
@@ -280,6 +310,10 @@ struct icp_qat_fw_comn_resp {
 	QAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \
 	QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK)
 
+#define ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_INVALID_PARAM_BITPOS, \
+	QAT_COMN_RESP_INVALID_PARAM_MASK)
+
 #define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
 #define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
 #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index 215b291b74..eba9f96685 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -22,14 +22,24 @@ enum icp_qat_fw_la_cmd_id {
 	ICP_QAT_FW_LA_CMD_DELIMITER = 18
 };
 
+/* In GEN_LCE Command ID 4 corresponds to AEAD */
+#define ICP_QAT_FW_LA_CMD_AEAD 4
+
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 #define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 
+/* GEN_LCE Hash, HMAC and GCM Verification Status */
+#define ICP_QAT_FW_LA_VER_STATUS_FAIL ICP_QAT_FW_COMN_GEN_LCE_STATUS_FLAG_ERROR
+
+
 struct icp_qat_fw_la_bulk_req {
 	struct icp_qat_fw_comn_req_hdr comn_hdr;
-	struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+	union {
+		struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+		struct lce_key_buff_desc key_buff;
+	};
 	struct icp_qat_fw_comn_req_mid comn_mid;
 	struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
 	struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
@@ -81,6 +91,21 @@ struct icp_qat_fw_la_bulk_req {
 #define ICP_QAT_FW_LA_PARTIAL_END 2
 #define QAT_LA_PARTIAL_BITPOS 0
 #define QAT_LA_PARTIAL_MASK 0x3
+
+/* GEN_LCE specific Crypto Flags fields */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS 6
+#define ICP_QAT_FW_SYM_AEAD_ALGO_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_SIZE_BITPOS 9
+#define ICP_QAT_FW_SYM_IV_SIZE_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS 11
+#define ICP_QAT_FW_SYM_IV_IN_DESC_MASK 0x1
+#define ICP_QAT_FW_SYM_IV_IN_DESC_VALID 1
+#define ICP_QAT_FW_SYM_DIRECTION_BITPOS 15
+#define ICP_QAT_FW_SYM_DIRECTION_MASK 0x1
+
+/* In GEN_LCE AEAD AES GCM Algorithm has ID 0 */
+#define QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE 0
+
 #define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
 	cmp_auth, ret_auth, update_state, \
 	ciph_iv, ciphcfg, partial) \
@@ -188,6 +213,23 @@ struct icp_qat_fw_la_bulk_req {
 	QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
 	QAT_LA_PARTIAL_MASK)
 
+/* GEN_LCE specific Crypto Flags operations */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS, \
+	ICP_QAT_FW_SYM_AEAD_ALGO_MASK)
+
+#define ICP_QAT_FW_SYM_IV_SIZE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_SIZE_BITPOS, \
+	ICP_QAT_FW_SYM_IV_SIZE_MASK)
+
+#define ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS, \
+	ICP_QAT_FW_SYM_IV_IN_DESC_MASK)
+
+#define ICP_QAT_FW_SYM_DIR_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_DIRECTION_BITPOS, \
+	ICP_QAT_FW_SYM_DIRECTION_MASK)
+
 #define QAT_FW_LA_MODE2 1
 #define QAT_FW_LA_NO_MODE2 0
 #define QAT_FW_LA_MODE2_MASK 0x1
@@ -424,4 +466,5 @@ struct icp_qat_fw_la_cipher_30_req_params {
 
 		} u;
 };
+
 #endif
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index f55dc3c6f0..18e652e393 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -62,6 +62,12 @@ static const struct rte_pci_id pci_id_qat_map[] = {
 		{
 			RTE_PCI_DEVICE(0x8086, 0x4945),
 		},
+		{
+			RTE_PCI_DEVICE(0x8086, 0x1454),
+		},
+		{
+			RTE_PCI_DEVICE(0x8086, 0x1456),
+		},
 		{.device_id = 0},
 };
 
@@ -199,6 +205,9 @@ pick_gen(const struct rte_pci_device *pci_dev)
 	case 0x4943:
 	case 0x4945:
 		return QAT_GEN4;
+	case 0x1454:
+	case 0x1456:
+		return QAT_GEN_LCE;
 	default:
 		QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
 		return QAT_N_GENS;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v2 3/4] crypto/qat: update headers for GEN LCE support
  2024-02-26 13:03 ` [PATCH v2 0/4] add QAT GEN LCE device Nishikant Nayak
  2024-02-26 13:03   ` [PATCH v2 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
  2024-02-26 13:03   ` [PATCH v2 2/4] common/qat: update common driver to support " Nishikant Nayak
@ 2024-02-26 13:03   ` Nishikant Nayak
  2024-02-26 13:03   ` [PATCH v2 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
  3 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-26 13:03 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, Nishikant Nayak

This patch handles the changes required for updating the common
header fields specific to GEN LCE, Also added/updated of the response
processing APIs based on GEN LCE requirement.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
---
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
    - Added GEN LCE specific API for deque burst.
    - Fixed code formatting.
---
---
 drivers/crypto/qat/qat_sym.c         | 16 ++++++-
 drivers/crypto/qat/qat_sym.h         | 60 ++++++++++++++++++++++++++-
 drivers/crypto/qat/qat_sym_session.c | 62 +++++++++++++++++++++++++++-
 drivers/crypto/qat/qat_sym_session.h | 10 ++++-
 4 files changed, 140 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 6e03bde841..439a3fc00b 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -180,7 +180,15 @@ qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
 	return qat_dequeue_op_burst(qp, (void **)ops,
-				qat_sym_process_response, nb_ops);
+			qat_sym_process_response, nb_ops);
+}
+
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops,
+							uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+			qat_sym_process_response_gen_lce, nb_ops);
 }
 
 int
@@ -200,6 +208,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
 	struct rte_cryptodev *cryptodev;
 	struct qat_cryptodev_private *internals;
+	enum qat_device_gen qat_dev_gen = qat_pci_dev->qat_dev_gen;
 	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
 		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
 
@@ -249,7 +258,10 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
 	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
-	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
+	if (qat_dev_gen == QAT_GEN_LCE)
+		cryptodev->dequeue_burst = qat_sym_dequeue_burst_gen_lce;
+	else
+		cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
 	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f2f197d050..3461113c13 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -90,7 +90,7 @@
 /*
  * Maximum number of SGL entries
  */
-#define QAT_SYM_SGL_MAX_NUMBER	16
+#define QAT_SYM_SGL_MAX_NUMBER 16
 
 /* Maximum data length for single pass GMAC: 2^14-1 */
 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
@@ -142,6 +142,10 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 #ifdef RTE_QAT_OPENSSL
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
@@ -390,6 +394,52 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
 	return 1;
 }
 
+static __rte_always_inline int
+qat_sym_process_response_gen_lce(void **op, uint8_t *resp,
+	void *op_cookie __rte_unused,
+	uint64_t *dequeue_err_count __rte_unused)
+{
+	struct icp_qat_fw_comn_resp *resp_msg =
+		(struct icp_qat_fw_comn_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+		(resp_msg->opaque_data);
+	struct qat_sym_session *sess;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+		sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+	sess = CRYPTODEV_GET_SYM_SESS_PRIV(rx_op->sym->session);
+
+	rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+		ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+	else if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+		ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+	if (sess->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		if (ICP_QAT_FW_LA_VER_STATUS_FAIL ==
+			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+				resp_msg->comn_hdr.comn_status))
+			rx_op->status =	RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	}
+
+	*op = (void *)rx_op;
+
+	/*
+	 * return 1 as dequeue op only move on to the next op
+	 * if one was ready to return to API
+	 */
+	return 1;
+}
+
 int
 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
@@ -455,7 +505,13 @@ qat_sym_preprocess_requests(void **ops __rte_unused,
 
 static inline void
 qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
-	void *op_cookie __rte_unused)
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
+{
+}
+
+static inline void
+qat_sym_process_response_gen_lce(void **op __rte_unused, uint8_t *resp __rte_unused,
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
 {
 }
 
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 9f4f6c3d93..8f50b61365 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -136,6 +136,9 @@ qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 static void
 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session);
+
 /* Req/cd init functions */
 
 static void
@@ -738,6 +741,12 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		session->qat_cmd);
 		return -ENOTSUP;
 	}
+
+	if (qat_dev_gen == QAT_GEN_LCE) {
+		qat_sym_session_init_gen_lce_hdr(session);
+		return 0;
+	}
+
 	qat_sym_session_finalize(session);
 
 	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
@@ -1016,6 +1025,12 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 			dev->data->dev_private;
 	enum qat_device_gen qat_dev_gen =
 			internals->qat_dev->qat_dev_gen;
+	if (qat_dev_gen == QAT_GEN_LCE) {
+		struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+		struct lce_key_buff_desc *key_buff = &req_tmpl->key_buff;
+
+		key_buff->keybuff = session->key_paddr;
+	}
 
 	/*
 	 * Store AEAD IV parameters as cipher IV,
@@ -1079,9 +1094,15 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 	}
 
 	if (session->is_single_pass) {
-		if (qat_sym_cd_cipher_set(session,
+		if (qat_dev_gen != QAT_GEN_LCE) {
+			if (qat_sym_cd_cipher_set(session,
 				aead_xform->key.data, aead_xform->key.length))
-			return -EINVAL;
+				return -EINVAL;
+		} else {
+			session->auth_key_length = aead_xform->key.length;
+			memcpy(session->key_array, aead_xform->key.data,
+							aead_xform->key.length);
+		}
 	} else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
 			aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
 			(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
@@ -1970,6 +1991,43 @@ qat_sym_session_init_common_hdr(struct qat_sym_session *session)
 					ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
 }
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session)
+{
+	struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+	/*
+	 * GEN_LCE specifies separate command id for AEAD operations but Cryptodev
+	 * API processes AEAD operations as Single pass Crypto operations.
+	 * Hence even for GEN_LCE, Session Algo Command ID is CIPHER.
+	 * Note, however Session Algo Mode is AEAD.
+	 */
+	header->service_cmd_id = ICP_QAT_FW_LA_CMD_AEAD;
+	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+	header->hdr_flags =
+	ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(ICP_QAT_FW_COMN_REQ_FLAG_SET,
+			ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT);
+	header->comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(QAT_COMN_PTR_TYPE_SGL,
+			QAT_COMN_KEY_BUFFER_USED);
+
+	ICP_QAT_FW_SYM_AEAD_ALGO_SET(header->serv_specif_flags,
+		QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE);
+	ICP_QAT_FW_SYM_IV_SIZE_SET(header->serv_specif_flags,
+		ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+	ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(header->serv_specif_flags,
+		ICP_QAT_FW_SYM_IV_IN_DESC_VALID);
+
+	if (session->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
+			ICP_QAT_HW_CIPHER_DECRYPT);
+	} else {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
+			ICP_QAT_HW_CIPHER_ENCRYPT);
+	}
+}
+
 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 						const uint8_t *cipherkey,
 						uint32_t cipherkeylen)
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 9209e2e8df..958af03405 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -111,10 +111,16 @@ struct qat_sym_session {
 	enum icp_qat_hw_auth_op auth_op;
 	enum icp_qat_hw_auth_mode auth_mode;
 	void *bpi_ctx;
-	struct qat_sym_cd cd;
+	union {
+		struct qat_sym_cd cd;
+		uint8_t key_array[32];
+	};
 	uint8_t prefix_state[QAT_PREFIX_TBL_SIZE] __rte_cache_aligned;
 	uint8_t *cd_cur_ptr;
-	phys_addr_t cd_paddr;
+	union {
+		phys_addr_t cd_paddr;
+		phys_addr_t key_paddr;
+	};
 	phys_addr_t prefix_paddr;
 	struct icp_qat_fw_la_bulk_req fw_req;
 	uint8_t aad_len;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v2 4/4] test/cryptodev: add tests for GCM with AAD
  2024-02-26 13:03 ` [PATCH v2 0/4] add QAT GEN LCE device Nishikant Nayak
                     ` (2 preceding siblings ...)
  2024-02-26 13:03   ` [PATCH v2 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
@ 2024-02-26 13:03   ` Nishikant Nayak
  3 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-26 13:03 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi,
	Nishikant Nayak, Akhil Goyal, Fan Zhang

Adding one new unit test code for validating the features
added as part of GCM with 64 byte AAD.
The new test case adds one new test for GCM algo for both
encrypt and decrypt operations.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
---
v2:
    - Removed unused code.
    - Added one new unit test, AAD with GCM for GEN LCE.
---
---
 app/test/test_cryptodev.c                   | 48 +++++++++++++---
 app/test/test_cryptodev_aead_test_vectors.h | 62 +++++++++++++++++++++
 2 files changed, 103 insertions(+), 7 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 38a65aa88f..edd23731f7 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -12494,6 +12494,18 @@ test_AES_GCM_auth_decryption_test_case_256_7(void)
 	return test_authenticated_decryption(&gcm_test_case_256_7);
 }
 
+static int
+test_AES_GCM_auth_decryption_test_case_256_8(void)
+{
+	return test_authenticated_decryption(&gcm_test_case_256_8);
+}
+
+static int
+test_AES_GCM_auth_encryption_test_case_256_8(void)
+{
+	return test_authenticated_encryption(&gcm_test_case_256_8);
+}
+
 static int
 test_AES_GCM_auth_decryption_test_case_aad_1(void)
 {
@@ -12613,10 +12625,16 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
@@ -12719,16 +12737,22 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
-		return TEST_SKIPPED;
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
 
 	/* not supported with CPU crypto and raw data-path APIs*/
 	if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO ||
 			global_api_test_type == CRYPTODEV_RAW_API_TEST)
 		return TEST_SKIPPED;
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
+		return TEST_SKIPPED;
 
 	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
 			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
@@ -15749,10 +15773,16 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	/*
@@ -17392,6 +17422,8 @@ static struct unit_test_suite cryptodev_aes_gcm_auth_testsuite  = {
 			test_AES_GCM_auth_encryption_test_case_256_6),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_auth_encryption_test_case_256_7),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encryption_test_case_256_8),
 
 		/** AES GCM Authenticated Decryption 256 bits key */
 		TEST_CASE_ST(ut_setup, ut_teardown,
@@ -17408,6 +17440,8 @@ static struct unit_test_suite cryptodev_aes_gcm_auth_testsuite  = {
 			test_AES_GCM_auth_decryption_test_case_256_6),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_auth_decryption_test_case_256_7),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_decryption_test_case_256_8),
 
 		/** AES GCM Authenticated Encryption big aad size */
 		TEST_CASE_ST(ut_setup, ut_teardown,
diff --git a/app/test/test_cryptodev_aead_test_vectors.h b/app/test/test_cryptodev_aead_test_vectors.h
index 07292620a4..eadf206e4d 100644
--- a/app/test/test_cryptodev_aead_test_vectors.h
+++ b/app/test/test_cryptodev_aead_test_vectors.h
@@ -17,6 +17,16 @@ static uint8_t gcm_aad_text[MAX_AAD_LENGTH] = {
 		0x00, 0xf1, 0xe2, 0xd3, 0xc4, 0xb5, 0xa6, 0x97,
 		0x88, 0x79, 0x6a, 0x5b, 0x4c, 0x3d, 0x2e, 0x1f };
 
+static uint8_t gcm_aad_64B_text[MAX_AAD_LENGTH] = {
+		0xED, 0x3E, 0xA8, 0x1F, 0x74, 0xE5, 0xD1, 0x96,
+		0xA4, 0xD5, 0x4B, 0x26, 0xBB, 0x20, 0x61, 0x7B,
+		0x3B, 0x9C, 0x2A, 0x69, 0x90, 0xEF, 0xD7, 0x9A,
+		0x94, 0xC2, 0xF5, 0x86, 0xBD, 0x00, 0xF6, 0xEA,
+		0x0B, 0x14, 0x24, 0xF2, 0x08, 0x67, 0x42, 0x3A,
+		0xB5, 0xB8, 0x32, 0x97, 0xB5, 0x99, 0x69, 0x75,
+		0x60, 0x00, 0x8F, 0xF7, 0x6F, 0x16, 0x52, 0x66,
+		0xF1, 0xA9, 0x38, 0xFD, 0xB0, 0x61, 0x60, 0xB5 };
+
 static uint8_t ccm_aad_test_1[8] = {
 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
 };
@@ -1736,6 +1746,58 @@ static const struct aead_test_data gcm_test_case_256_7 = {
 	}
 };
 
+static const struct aead_test_data gcm_test_case_256_8 = {
+	.algo = RTE_CRYPTO_AEAD_AES_GCM,
+	.key = {
+		.data = {
+			0xD8, 0xFD, 0x8F, 0x5A, 0x13, 0x7B, 0x05, 0x2C,
+			0xA4, 0x64, 0x7A, 0xDD, 0x1E, 0x9A, 0x68, 0x33,
+			0x04, 0x70, 0xE8, 0x1E, 0x42, 0x84, 0x64, 0xD2,
+			0x23, 0xA1, 0x6A, 0x0A, 0x05, 0x7B, 0x90, 0xDE},
+		.len = 32
+	},
+	.iv = {
+		.data = {
+			0x8D, 0xDF, 0xB8, 0x7F, 0xD0, 0x79, 0x77, 0x55,
+			0xD5, 0x48, 0x03, 0x05},
+		.len = 12
+	},
+	.aad = {
+		.data = gcm_aad_64B_text,
+		.len = 64
+	},
+	.plaintext = {
+		.data = {
+			0x4D, 0xBC, 0x2C, 0x7F, 0x25, 0x1F, 0x07, 0x25,
+			0x54, 0x8C, 0x43, 0xDB, 0xD8, 0x06, 0x9F, 0xBF,
+			0xCA, 0x60, 0xF4, 0xEF, 0x13, 0x87, 0xE8, 0x2F,
+			0x4D, 0x9D, 0x1D, 0x87, 0x9F, 0x91, 0x79, 0x7E,
+			0x3E, 0x98, 0xA3, 0x63, 0xC6, 0xFE, 0xDB, 0x35,
+			0x96, 0x59, 0xB2, 0x0C, 0x80, 0x96, 0x70, 0x07,
+			0x87, 0x42, 0xAB, 0x4F, 0x31, 0x73, 0xC4, 0xF9,
+			0xB0, 0x1E, 0xF1, 0xBC, 0x7D, 0x45, 0xE5, 0xF3},
+		.len = 64
+	},
+	.ciphertext = {
+	    .data = {
+			0x21, 0xFA, 0x59, 0x4F, 0x1F, 0x6B, 0x19, 0xC2,
+			0x68, 0xBC, 0x05, 0x93, 0x4E, 0x48, 0x6C, 0x5B,
+			0x0B, 0x7A, 0x43, 0xB7, 0x60, 0x8E, 0x00, 0xC4,
+			0xAB, 0x14, 0x6B, 0xCC, 0xA1, 0x27, 0x6A, 0xDE,
+			0x8E, 0xB6, 0x98, 0xBB, 0x4F, 0xD0, 0x6F, 0x30,
+			0x0F, 0x04, 0xA8, 0x5B, 0xDC, 0xD8, 0xE8, 0x8A,
+			0x73, 0xD9, 0xB8, 0x60, 0x7C, 0xE4, 0x32, 0x4C,
+			0x3A, 0x0B, 0xC2, 0x82, 0xDA, 0x88, 0x17, 0x69},
+	    .len = 64
+	},
+	.auth_tag = {
+		.data = {
+			0x3B, 0x80, 0x83, 0x72, 0xE5, 0x1B, 0x94, 0x15,
+			0x75, 0xC8, 0x62, 0xBC, 0xA1, 0x66, 0x91, 0x45},
+		.len = 16
+	}
+};
+
 /** variable AAD AES-GCM-128 Test Vectors */
 static const struct aead_test_data gcm_test_case_aad_1 = {
 	.algo = RTE_CRYPTO_AEAD_AES_GCM,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v3 0/4] add new QAT gen3 and gen5
  2023-12-20 13:26 [PATCH 1/4] common/qat: add files specific to GEN5 Nishikant Nayak
                   ` (4 preceding siblings ...)
  2024-02-26 13:03 ` [PATCH v2 0/4] add QAT GEN LCE device Nishikant Nayak
@ 2024-02-27  9:35 ` Nishikant Nayak
  2024-02-27  9:35   ` [PATCH v3 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
                     ` (3 more replies)
  2024-02-27  9:40 ` [PATCH v4 0/4] add QAT GEN LCE device Nishikant Nayak
                   ` (4 subsequent siblings)
  10 siblings, 4 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-27  9:35 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, Nishikant Nayak

This patchset adds support for two new QAT devices.
A new GEN3 device, and a GEN5 device, both of which have
wireless slice support for algorithms such as ZUC-256.

Symmetric, asymmetric and compression are all supported
for these devices.

v3:
  - Fixed typos in commit and code comments.
  - Replaced use of linux/kernel.h macro with local macro
    to fix ARM compilation in CI.
v2:
  - New patch added for gen5 device that reuses gen4 code,
    and new gen3 wireless slice changes.
  - Removed patch to disable asymmetric and compression.
  - Documentation updates added.
  - Fixed ZUC-256 IV modification for raw API path.
  - Fixed setting extended protocol flag bit position.
  - Added check for ZUC-256 wireless slice in slice map.

Nishikant Nayak (4):
  common/qat: add files specific to GEN LCE
  common/qat: update common driver to support GEN LCE
  crypto/qat: update headers for GEN LCE support
  test/cryptodev: add tests for GCM with AAD

 .mailmap                                      |   1 +
 app/test/test_cryptodev.c                     |  48 ++-
 app/test/test_cryptodev_aead_test_vectors.h   |  62 ++++
 drivers/common/qat/dev/qat_dev_gen_lce.c      | 306 ++++++++++++++++
 drivers/common/qat/meson.build                |   2 +
 .../qat/qat_adf/adf_transport_access_macros.h |   1 +
 .../adf_transport_access_macros_gen_lce.h     |  51 +++
 .../adf_transport_access_macros_gen_lcevf.h   |  48 +++
 drivers/common/qat/qat_adf/icp_qat_fw.h       |  34 ++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    |  59 +++-
 drivers/common/qat/qat_common.h               |   1 +
 drivers/common/qat/qat_device.c               |   9 +
 .../crypto/qat/dev/qat_crypto_pmd_gen_lce.c   | 329 ++++++++++++++++++
 drivers/crypto/qat/qat_sym.c                  |  16 +-
 drivers/crypto/qat/qat_sym.h                  |  66 +++-
 drivers/crypto/qat/qat_sym_session.c          |  62 +++-
 drivers/crypto/qat/qat_sym_session.h          |  10 +-
 17 files changed, 1089 insertions(+), 16 deletions(-)
 create mode 100644 drivers/common/qat/dev/qat_dev_gen_lce.c
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
 create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c

-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v3 1/4] common/qat: add files specific to GEN LCE
  2024-02-27  9:35 ` [PATCH v3 0/4] add new QAT gen3 and gen5 Nishikant Nayak
@ 2024-02-27  9:35   ` Nishikant Nayak
  2024-02-27  9:35   ` [PATCH v3 2/4] common/qat: update common driver to support " Nishikant Nayak
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-27  9:35 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi,
	Nishikant Nayak, Thomas Monjalon, Anatoly Burakov

Adding GEN5 files for handling GEN LCE specific operations.
These files are inherited from the existing files/APIs
which has some changes specific GEN5 requirements
Also updated the mailmap file.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
---
v3:
    - Removed use of linux/kernel.h macro to fix ARM compilation.
    - Fixed typo in commit body and code comment.
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
---
---
 .mailmap                                      |   1 +
 drivers/common/qat/dev/qat_dev_gen_lce.c      | 306 ++++++++++++++++
 drivers/common/qat/meson.build                |   2 +
 .../adf_transport_access_macros_gen_lce.h     |  51 +++
 .../adf_transport_access_macros_gen_lcevf.h   |  48 +++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    |  14 +
 drivers/common/qat/qat_common.h               |   1 +
 .../crypto/qat/dev/qat_crypto_pmd_gen_lce.c   | 329 ++++++++++++++++++
 drivers/crypto/qat/qat_sym.h                  |   6 +
 9 files changed, 758 insertions(+)
 create mode 100644 drivers/common/qat/dev/qat_dev_gen_lce.c
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
 create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c

diff --git a/.mailmap b/.mailmap
index 58cca13ef6..8008e5a899 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1036,6 +1036,7 @@ Ning Li <muziding001@163.com> <lining18@jd.com>
 Nipun Gupta <nipun.gupta@amd.com> <nipun.gupta@nxp.com>
 Nir Efrati <nir.efrati@intel.com>
 Nirmoy Das <ndas@suse.de>
+Nishikant Nayak <nishikanta.nayak@intel.com>
 Nithin Dabilpuram <ndabilpuram@marvell.com> <nithin.dabilpuram@caviumnetworks.com>
 Nitin Saxena <nitin.saxena@caviumnetworks.com>
 Nitzan Weller <nitzanwe@mellanox.com>
diff --git a/drivers/common/qat/dev/qat_dev_gen_lce.c b/drivers/common/qat/dev/qat_dev_gen_lce.c
new file mode 100644
index 0000000000..27219ff942
--- /dev/null
+++ b/drivers/common/qat/dev/qat_dev_gen_lce.c
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_pci.h>
+#include <rte_vfio.h>
+
+#include "qat_device.h"
+#include "qat_qp.h"
+#include "adf_transport_access_macros_gen_lcevf.h"
+#include "adf_pf2vf_msg.h"
+#include "qat_pf2vf.h"
+
+#include <stdint.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#define BITS_PER_ULONG		(sizeof(unsigned long) * 8)
+
+#define VFIO_PCI_LCE_DEVICE_CFG_REGION_INDEX	VFIO_PCI_NUM_REGIONS
+#define VFIO_PCI_LCE_CY_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 2)
+#define VFIO_PCI_LCE_RING_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 4)
+#define LCE_DEVICE_NAME_SIZE			64
+#define LCE_DEVICE_MAX_BANKS			2080
+#define LCE_DIV_ROUND_UP(n, d)  (((n) + (d) - 1) / (d))
+#define LCE_DEVICE_BITMAP_SIZE  \
+	LCE_DIV_ROUND_UP(LCE_DEVICE_MAX_BANKS, BITS_PER_ULONG)
+
+/* QAT GEN_LCE specific macros */
+#define QAT_GEN_LCE_BUNDLE_NUM		LCE_DEVICE_MAX_BANKS
+#define QAT_GEN4_QPS_PER_BUNDLE_NUM	1
+
+/**
+ * struct lce_vfio_dev_cap - LCE device capabilities
+ *
+ * Device level capabilities and service level capabilities
+ */
+struct lce_vfio_dev_cap {
+	uint16_t device_num;
+	uint16_t device_type;
+	uint32_t capability_mask;
+	uint32_t extended_capabilities;
+	uint16_t max_banks;
+	uint16_t max_rings_per_bank;
+	uint16_t arb_mask;
+	uint16_t services;
+	uint16_t pkg_id;
+	uint16_t node_id;
+	__u8 device_name[LCE_DEVICE_NAME_SIZE];
+};
+
+/* struct lce_vfio_dev_cy_cap - CY capabilities of LCE device */
+struct lce_vfio_dev_cy_cap {
+	uint32_t nr_banks;
+	unsigned long bitmap[LCE_DEVICE_BITMAP_SIZE];
+};
+
+struct lce_qat_domain {
+	uint32_t nid        :3;
+	uint32_t fid        :7;
+	uint32_t ftype      :2;
+	uint32_t vfid       :13;
+	uint32_t rid        :4;
+	uint32_t vld        :1;
+	uint32_t desc_over  :1;
+	uint32_t pasid_vld  :1;
+	uint32_t pasid      :20;
+};
+
+struct lce_qat_buf_domain {
+	uint32_t bank_id:   20;
+	uint32_t type:      4;
+	uint32_t resv:      8;
+	struct lce_qat_domain dom;
+};
+
+struct qat_dev_gen_lce_extra {
+	struct qat_qp_hw_data
+	    qp_gen_lce_data[QAT_GEN_LCE_BUNDLE_NUM][QAT_GEN4_QPS_PER_BUNDLE_NUM];
+};
+
+static struct qat_pf2vf_dev qat_pf2vf_gen_lce = {
+	.pf2vf_offset = ADF_4XXXIOV_PF2VM_OFFSET,
+	.vf2pf_offset = ADF_4XXXIOV_VM2PF_OFFSET,
+	.pf2vf_type_shift = ADF_PFVF_2X_MSGTYPE_SHIFT,
+	.pf2vf_type_mask = ADF_PFVF_2X_MSGTYPE_MASK,
+	.pf2vf_data_shift = ADF_PFVF_2X_MSGDATA_SHIFT,
+	.pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK,
+};
+
+static int
+qat_select_valid_queue_gen_lce(struct qat_pci_device *qat_dev, int qp_id,
+			    enum qat_service_type service_type)
+{
+	int i = 0, valid_qps = 0;
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+	for (; i < QAT_GEN_LCE_BUNDLE_NUM; i++) {
+		if (dev_extra->qp_gen_lce_data[i][0].service_type ==
+				service_type) {
+			if (valid_qps == qp_id)
+				return i;
+			++valid_qps;
+		}
+	}
+	return -1;
+}
+
+static const struct qat_qp_hw_data *
+qat_qp_get_hw_data_gen_lce(struct qat_pci_device *qat_dev,
+			enum qat_service_type service_type, uint16_t qp_id)
+{
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+	int ring_pair = qat_select_valid_queue_gen_lce(qat_dev, qp_id,
+								service_type);
+
+	if (ring_pair < 0)
+		return NULL;
+
+	return &dev_extra->qp_gen_lce_data[ring_pair][0];
+}
+
+static int
+qat_qp_rings_per_service_gen_lce(struct qat_pci_device *qat_dev,
+			      enum qat_service_type service)
+{
+	int i = 0, count = 0, max_ops_per_srv = 0;
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+	max_ops_per_srv = QAT_GEN_LCE_BUNDLE_NUM;
+	for (i = 0, count = 0; i < max_ops_per_srv; i++)
+		if (dev_extra->qp_gen_lce_data[i][0].service_type == service)
+			count++;
+	return count;
+}
+
+static int qat_dev_read_config_gen_lce(struct qat_pci_device *qat_dev)
+{
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+	struct qat_qp_hw_data *hw_data;
+
+	/** Enable only crypto ring: RP-0 */
+	hw_data = &dev_extra->qp_gen_lce_data[0][0];
+	memset(hw_data, 0, sizeof(*hw_data));
+
+	hw_data->service_type = QAT_SERVICE_SYMMETRIC;
+	hw_data->tx_msg_size = 128;
+	hw_data->rx_msg_size = 32;
+
+	hw_data->tx_ring_num = 0;
+	hw_data->rx_ring_num = 1;
+
+	hw_data->hw_bundle_num = 0;
+
+	return 0;
+}
+
+static void qat_qp_build_ring_base_gen_lce(void *io_addr, struct qat_queue *queue)
+{
+	uint64_t queue_base;
+
+	queue_base = BUILD_RING_BASE_ADDR_GEN_LCE(queue->base_phys_addr,
+					       queue->queue_size);
+	WRITE_CSR_RING_BASE_GEN_LCEVF(io_addr, queue->hw_bundle_number,
+				   queue->hw_queue_number, queue_base);
+}
+
+static void
+qat_qp_adf_arb_enable_gen_lce(const struct qat_queue *txq,
+			   void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+	    (ADF_RING_BUNDLE_SIZE_GEN_LCE *
+	     txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,
+			   arb_csr_offset);
+	value |= 0x01;
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_arb_disable_gen_lce(const struct qat_queue *txq,
+			    void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_RING_BUNDLE_SIZE_GEN_LCE *
+							txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,
+			   arb_csr_offset);
+	value &= ~(0x01);
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_configure_queues_gen_lce(struct qat_qp *qp)
+{
+	uint32_t q_tx_config, q_resp_config;
+	struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q;
+
+	/* q_tx/rx->queue_size is initialized as per bundle config register */
+	q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size);
+
+	q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size,
+					       ADF_RING_NEAR_WATERMARK_512,
+					       ADF_RING_NEAR_WATERMARK_0);
+
+	WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_tx->hw_bundle_number,
+				     q_tx->hw_queue_number, q_tx_config);
+	WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_rx->hw_bundle_number,
+				     q_rx->hw_queue_number, q_resp_config);
+}
+
+static void
+qat_qp_csr_write_tail_gen_lce(struct qat_qp *qp, struct qat_queue *q)
+{
+	WRITE_CSR_RING_TAIL_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, q->tail);
+}
+
+static void
+qat_qp_csr_write_head_gen_lce(struct qat_qp *qp, struct qat_queue *q,
+			   uint32_t new_head)
+{
+	WRITE_CSR_RING_HEAD_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, new_head);
+}
+
+static void
+qat_qp_csr_setup_gen_lce(struct qat_pci_device *qat_dev, void *io_addr,
+		      struct qat_qp *qp)
+{
+	qat_qp_build_ring_base_gen_lce(io_addr, &qp->tx_q);
+	qat_qp_build_ring_base_gen_lce(io_addr, &qp->rx_q);
+	qat_qp_adf_configure_queues_gen_lce(qp);
+	qat_qp_adf_arb_enable_gen_lce(&qp->tx_q, qp->mmap_bar_addr,
+				   &qat_dev->arb_csr_lock);
+}
+
+static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen_lce = {
+	.qat_qp_rings_per_service = qat_qp_rings_per_service_gen_lce,
+	.qat_qp_build_ring_base = qat_qp_build_ring_base_gen_lce,
+	.qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen_lce,
+	.qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen_lce,
+	.qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen_lce,
+	.qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen_lce,
+	.qat_qp_csr_write_head = qat_qp_csr_write_head_gen_lce,
+	.qat_qp_csr_setup = qat_qp_csr_setup_gen_lce,
+	.qat_qp_get_hw_data = qat_qp_get_hw_data_gen_lce,
+};
+
+static int
+qat_reset_ring_pairs_gen_lce(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static const struct rte_mem_resource*
+qat_dev_get_transport_bar_gen_lce(struct rte_pci_device *pci_dev)
+{
+	return &pci_dev->mem_resource[0];
+}
+
+static int
+qat_dev_get_misc_bar_gen_lce(struct rte_mem_resource **mem_resource,
+			  struct rte_pci_device *pci_dev)
+{
+	*mem_resource = &pci_dev->mem_resource[2];
+	return 0;
+}
+
+static int
+qat_dev_get_extra_size_gen_lce(void)
+{
+	return sizeof(struct qat_dev_gen_lce_extra);
+}
+
+static int
+qat_dev_get_slice_map_gen_lce(uint32_t *map __rte_unused,
+	const struct rte_pci_device *pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen_lce = {
+	.qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen_lce,
+	.qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen_lce,
+	.qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen_lce,
+	.qat_dev_read_config = qat_dev_read_config_gen_lce,
+	.qat_dev_get_extra_size = qat_dev_get_extra_size_gen_lce,
+	.qat_dev_get_slice_map = qat_dev_get_slice_map_gen_lce,
+};
+
+RTE_INIT(qat_dev_gen_lce_init)
+{
+	qat_qp_hw_spec[QAT_GEN_LCE] = &qat_qp_hw_spec_gen_lce;
+	qat_dev_hw_spec[QAT_GEN_LCE] = &qat_dev_hw_spec_gen_lce;
+	qat_gen_config[QAT_GEN_LCE].dev_gen = QAT_GEN_LCE;
+	qat_gen_config[QAT_GEN_LCE].pf2vf_dev = &qat_pf2vf_gen_lce;
+}
diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 62abcb6fe3..bc7c3e5b85 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -82,6 +82,7 @@ sources += files(
         'dev/qat_dev_gen2.c',
         'dev/qat_dev_gen3.c',
         'dev/qat_dev_gen4.c',
+        'dev/qat_dev_gen_lce.c',
 )
 includes += include_directories(
         'qat_adf',
@@ -108,6 +109,7 @@ if qat_crypto
             'dev/qat_crypto_pmd_gen2.c',
             'dev/qat_crypto_pmd_gen3.c',
             'dev/qat_crypto_pmd_gen4.c',
+            'dev/qat_crypto_pmd_gen_lce.c',
         ]
         sources += files(join_paths(qat_crypto_relpath, f))
     endforeach
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
new file mode 100644
index 0000000000..c9df8f5dd2
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+
+#include "adf_transport_access_macros.h"
+
+#define ADF_RINGS_PER_INT_SRCSEL_GEN4 2
+#define ADF_BANK_INT_SRC_SEL_MASK_GEN4 0x44UL
+#define ADF_BANK_INT_FLAG_CLEAR_MASK_GEN4 0x3
+#define ADF_RING_BUNDLE_SIZE_GEN_LCE 0x2000
+#define ADF_RING_CSR_RING_CONFIG_GEN_LCE 0x1000
+#define ADF_RING_CSR_RING_LBASE_GEN_LCE 0x1040
+#define ADF_RING_CSR_RING_UBASE_GEN_LCE 0x1080
+
+#define BUILD_RING_BASE_ADDR_GEN_LCE(addr, size) \
+	((((addr) >> 6) & (0xFFFFFFFFFFFFFFFFULL << (size))) << 6)
+
+#define WRITE_CSR_RING_BASE_GEN_LCE(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32);	\
+	ADF_CSR_WR(csr_base_addr,	\
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) +			\
+		ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2),		\
+		l_base);						\
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) +			\
+		ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+
+#define WRITE_CSR_RING_HEAD_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+
+#endif
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
new file mode 100644
index 0000000000..3c7232de12
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+
+#include "adf_transport_access_macros.h"
+#include "adf_transport_access_macros_gen_lce.h"
+
+#define ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF 0x0
+
+#define WRITE_CSR_RING_BASE_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2),	\
+		l_base);	\
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_HEAD_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_SRV_ARB_EN_GEN_LCEVF(csr_base_addr, bank, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+
+#endif
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index 70f0effa62..215b291b74 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -410,4 +410,18 @@ struct icp_qat_fw_la_cipher_20_req_params {
 	uint8_t    spc_auth_res_sz;
 };
 
+struct icp_qat_fw_la_cipher_30_req_params {
+		uint32_t   spc_aad_sz;
+		uint8_t    cipher_length;
+		uint8_t    reserved[2];
+		uint8_t    spc_auth_res_sz;
+		union {
+				uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+				struct {
+						uint64_t cipher_IV_ptr;
+						uint64_t resrvd1;
+			} s;
+
+		} u;
+};
 #endif
diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h
index 9411a79301..642e009f28 100644
--- a/drivers/common/qat/qat_common.h
+++ b/drivers/common/qat/qat_common.h
@@ -21,6 +21,7 @@ enum qat_device_gen {
 	QAT_GEN2,
 	QAT_GEN3,
 	QAT_GEN4,
+	QAT_GEN_LCE,
 	QAT_N_GENS
 };
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
new file mode 100644
index 0000000000..3f1668b3d3
--- /dev/null
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
@@ -0,0 +1,329 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
+#include "qat_sym_session.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
+#include "qat_crypto.h"
+#include "qat_crypto_pmd_gens.h"
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen_lce[] = {
+	QAT_SYM_AEAD_CAP(AES_GCM,
+		CAP_SET(block_size, 16),
+		CAP_RNG(key_size, 32, 32, 0), CAP_RNG(digest_size, 16, 16, 0),
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+qat_sgl_add_buffer_gen_lce(void *list_in, uint64_t addr, uint32_t len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr;
+
+	nr = list->num_bufs;
+
+	if (nr >= QAT_SYM_SGL_MAX_NUMBER) {
+		QAT_DP_LOG(ERR, "Adding %d entry failed, no empty SGL buffer", nr);
+		return -EINVAL;
+	}
+
+	list->buffers[nr].len = len;
+	list->buffers[nr].resrvd = 0;
+	list->buffers[nr].addr = addr;
+
+	list->num_bufs++;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+	QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+		nr, list->buffers[nr].len, list->buffers[nr].addr);
+#endif
+	return 0;
+}
+
+static int
+qat_sgl_fill_array_with_mbuf(struct rte_mbuf *buf, int64_t offset,
+	void *list_in, uint32_t data_len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr, buf_len;
+	int res = -EINVAL;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	uint32_t start_idx;
+	start_idx = list->num_bufs;
+#endif
+
+	/* Append to the existing list */
+	nr = list->num_bufs;
+
+	for (buf_len = 0; buf && nr < QAT_SYM_SGL_MAX_NUMBER; buf = buf->next) {
+		if (offset >= rte_pktmbuf_data_len(buf)) {
+			offset -= rte_pktmbuf_data_len(buf);
+			/* Jump to next mbuf */
+			continue;
+		}
+
+		list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset;
+		list->buffers[nr].resrvd = 0;
+		list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
+
+		offset = 0;
+		buf_len += list->buffers[nr].len;
+
+		if (buf_len >= data_len) {
+			list->buffers[nr].len -= buf_len - data_len;
+			res = 0;
+			break;
+		}
+		++nr;
+	}
+
+	if (unlikely(res != 0)) {
+		if (nr == QAT_SYM_SGL_MAX_NUMBER)
+			QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+				QAT_SYM_SGL_MAX_NUMBER);
+		else
+			QAT_DP_LOG(ERR, "Mbuf chain is too short");
+	} else {
+
+		list->num_bufs = ++nr;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+		for (nr = start_idx; nr < list->num_bufs; nr++) {
+			QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+				nr, list->buffers[nr].len,
+				list->buffers[nr].addr);
+		}
+#endif
+	}
+
+	return res;
+}
+
+static int
+qat_sym_build_op_aead_gen_lce(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_op *op = in_op;
+	uint64_t digest_phys_addr, aad_phys_addr;
+	uint16_t iv_len, aad_len, digest_len, key_len;
+	uint32_t cipher_ofs, iv_offset, cipher_len;
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct icp_qat_fw_la_cipher_30_req_params *cipher_param;
+	enum icp_qat_hw_cipher_dir dir;
+	bool is_digest_adjacent = false;
+
+	if (ctx->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER ||
+		ctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_AES256 ||
+		ctx->qat_mode != ICP_QAT_HW_CIPHER_AEAD_MODE) {
+
+		QAT_DP_LOG(ERR, "Not supported (cmd: %d, alg: %d, mode: %d). "
+			"GEN_LCE PMD only supports AES-256 AEAD mode",
+			ctx->qat_cmd, ctx->qat_cipher_alg, ctx->qat_mode);
+		return -EINVAL;
+	}
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+	cipher_param = (void *)&qat_req->serv_specif_rqpars;
+
+	dir = ctx->qat_dir;
+
+	aad_phys_addr = op->sym->aead.aad.phys_addr;
+	aad_len = ctx->aad_len;
+
+	iv_offset = ctx->cipher_iv.offset;
+	iv_len = ctx->cipher_iv.length;
+
+	cipher_ofs = op->sym->aead.data.offset;
+	cipher_len = op->sym->aead.data.length;
+
+	digest_phys_addr = op->sym->aead.digest.phys_addr;
+	digest_len = ctx->digest_length;
+
+	/* Up to 16B IV can be directly embedded in descriptor.
+	 *  GCM supports only 12B IV for GEN LCE
+	 */
+	if (iv_len != GCM_IV_LENGTH_GEN_LCE) {
+		QAT_DP_LOG(ERR, "iv_len: %d not supported. Must be 12B.",
+			iv_len);
+		return -EINVAL;
+	}
+
+	rte_memcpy(cipher_param->u.cipher_IV_array,
+		rte_crypto_op_ctod_offset(op, uint8_t*, iv_offset),
+		iv_len);
+
+	/* Always SGL */
+	RTE_ASSERT((qat_req->comn_hdr.comn_req_flags &
+		ICP_QAT_FW_SYM_COMM_ADDR_SGL) == 1);
+	/* Always inplace */
+	RTE_ASSERT(op->sym->m_dst == NULL);
+
+	/* Key buffer address is already programmed by reusing the
+	 * content-descriptor buffer
+	 */
+	key_len = ctx->auth_key_length;
+
+	cipher_param->spc_aad_sz = aad_len;
+	cipher_param->cipher_length = key_len;
+	cipher_param->spc_auth_res_sz = digest_len;
+
+	/* Knowing digest is contiguous to cipher-text helps optimizing SGL */
+	if (rte_pktmbuf_iova_offset(op->sym->m_src, cipher_ofs + cipher_len)
+		== digest_phys_addr)
+		is_digest_adjacent = true;
+
+	/* SRC-SGL: 3 entries:
+	 * a) AAD
+	 * b) cipher
+	 * c) digest (only for decrypt and buffer is_NOT_adjacent)
+	 *
+	 */
+	cookie->qat_sgl_src.num_bufs = 0;
+	if (aad_len)
+		qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src, aad_phys_addr,
+			aad_len);
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_src,
+			cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_src,
+			cipher_len);
+
+		/* Digest buffer in decrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_DECRYPT)
+			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src,
+				digest_phys_addr, digest_len);
+	}
+
+	/* (in-place) DST-SGL: 2 entries:
+	 * a) cipher
+	 * b) digest (only for encrypt and buffer is_NOT_adjacent)
+	 */
+	cookie->qat_sgl_dst.num_bufs = 0;
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_dst,
+			cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_dst,
+			cipher_len);
+
+		/* Digest buffer in Encrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_dst,
+				digest_phys_addr, digest_len);
+	}
+
+	/* Length values in 128B descriptor */
+	qat_req->comn_mid.src_length = cipher_len;
+	qat_req->comn_mid.dst_length = cipher_len;
+
+	if (dir == ICP_QAT_HW_CIPHER_ENCRYPT) /* Digest buffer in Encrypt job */
+		qat_req->comn_mid.dst_length += GCM_256_DIGEST_LEN;
+
+	/* src & dst SGL addresses in 128B descriptor */
+	qat_req->comn_mid.src_data_addr = cookie->qat_sgl_src_phys_addr;
+	qat_req->comn_mid.dest_data_addr = cookie->qat_sgl_dst_phys_addr;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+		sizeof(struct icp_qat_fw_la_bulk_req));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
+		rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+		rte_pktmbuf_data_len(op->sym->m_src));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
+		digest_len);
+	QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data, aad_len);
+#endif
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen_lce(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+
+	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
+		return -EINVAL;
+
+	/* build request for aead */
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES256 &&
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) {
+		build_request = qat_sym_build_op_aead_gen_lce;
+		ctx->build_request[proc_type] = build_request;
+	}
+	return 0;
+}
+
+
+static int
+qat_sym_crypto_cap_get_gen_lce(struct qat_cryptodev_private *internals,
+	const char *capa_memz_name,
+	const uint16_t __rte_unused slice_map)
+{
+	const uint32_t size = sizeof(qat_sym_crypto_caps_gen_lce);
+	uint32_t i;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+			size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities");
+			return -1;
+		}
+	}
+
+	struct rte_cryptodev_capabilities *addr =
+		(struct rte_cryptodev_capabilities *)
+		internals->capa_mz->addr;
+	const struct rte_cryptodev_capabilities *capabilities =
+		qat_sym_crypto_caps_gen_lce;
+	const uint32_t capa_num =
+		size / sizeof(struct rte_cryptodev_capabilities);
+	uint32_t curr_capa = 0;
+
+	for (i = 0; i < capa_num; i++) {
+		memcpy(addr + curr_capa, capabilities + i,
+			sizeof(struct rte_cryptodev_capabilities));
+		curr_capa++;
+	}
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	return 0;
+}
+
+RTE_INIT(qat_sym_crypto_gen_lce_init)
+{
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = &qat_sym_crypto_ops_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_capabilities =
+			qat_sym_crypto_cap_get_gen_lce;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_session =
+			qat_sym_crypto_set_session_gen_lce;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_raw_dp_ctx = NULL;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags =
+			qat_sym_crypto_feature_flags_get_gen1;
+}
+
+RTE_INIT(qat_asym_crypto_gen_lce_init)
+{
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_capabilities = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].set_session = NULL;
+}
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f976009bf2..f2f197d050 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -95,6 +95,12 @@
 /* Maximum data length for single pass GMAC: 2^14-1 */
 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
 
+/* Digest length for GCM Algo is 16 bytes */
+#define GCM_256_DIGEST_LEN 16
+
+/* IV length for GCM algo is 12 bytes */
+#define GCM_IV_LENGTH_GEN_LCE 12
+
 struct qat_sym_session;
 
 struct qat_sym_sgl {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v3 2/4] common/qat: update common driver to support GEN LCE
  2024-02-27  9:35 ` [PATCH v3 0/4] add new QAT gen3 and gen5 Nishikant Nayak
  2024-02-27  9:35   ` [PATCH v3 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
@ 2024-02-27  9:35   ` Nishikant Nayak
  2024-02-27  9:35   ` [PATCH v3 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
  2024-02-27  9:35   ` [PATCH v3 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
  3 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-27  9:35 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, Nishikant Nayak

Adding GEN LCE specific macros which is required for updating
the support for GEN LCE features.
Also this patch adds other macros which is being used by GEN LCE
Specific APIs.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
---
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
    - Fixed code formatting
---
---
 .../qat/qat_adf/adf_transport_access_macros.h |  1 +
 drivers/common/qat/qat_adf/icp_qat_fw.h       | 34 ++++++++++++++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    | 45 ++++++++++++++++++-
 drivers/common/qat/qat_device.c               |  9 ++++
 4 files changed, 88 insertions(+), 1 deletion(-)

diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
index 12a7258c60..19bd812419 100644
--- a/drivers/common/qat/qat_adf/adf_transport_access_macros.h
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
@@ -47,6 +47,7 @@
 #define ADF_RING_SIZE_512 0x03
 #define ADF_RING_SIZE_4K 0x06
 #define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_64K 0x0A
 #define ADF_RING_SIZE_4M 0x10
 #define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
 #define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw.h b/drivers/common/qat/qat_adf/icp_qat_fw.h
index 3aa17ae041..b78158e01d 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw.h
@@ -57,6 +57,12 @@ struct icp_qat_fw_comn_req_hdr_cd_pars {
 	} u;
 };
 
+struct lce_key_buff_desc {
+	uint64_t keybuff;
+	uint32_t keybuff_resrvd1;
+	uint32_t keybuff_resrvd2;
+};
+
 struct icp_qat_fw_comn_req_mid {
 	uint64_t opaque_data;
 	uint64_t src_data_addr;
@@ -123,6 +129,12 @@ struct icp_qat_fw_comn_resp {
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_BITPOS 0
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_MASK 0x1
 
+/* GEN_LCE specific Common Header fields */
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS 5
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_MASK 0x3
+#define ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT 3
+#define ICP_QAT_FW_COMN_GEN_LCE_STATUS_FLAG_ERROR 0
+
 #define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
 	icp_qat_fw_comn_req_hdr_t.service_type
 
@@ -168,6 +180,12 @@ struct icp_qat_fw_comn_resp {
 	(((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
 	 ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
 
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(valid, desc_layout) \
+	((((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+	ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) | \
+	(((desc_layout) & ICP_QAT_FW_COMN_DESC_LAYOUT_MASK) << \
+	ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS))
+
 #define QAT_COMN_PTR_TYPE_BITPOS 0
 #define QAT_COMN_PTR_TYPE_MASK 0x1
 #define QAT_COMN_CD_FLD_TYPE_BITPOS 1
@@ -180,10 +198,20 @@ struct icp_qat_fw_comn_resp {
 #define QAT_COMN_EXT_FLAGS_MASK 0x1
 #define QAT_COMN_EXT_FLAGS_USED 0x1
 
+/* GEN_LCE specific Common Request Flags fields */
+#define QAT_COMN_KEYBUF_USAGE_BITPOS 1
+#define QAT_COMN_KEYBUF_USAGE_MASK 0x1
+#define QAT_COMN_KEY_BUFFER_USED 1
+
 #define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
 	((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
 	 | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
 
+#define ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(ptr, keybuf) \
+	((((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS) | \
+	 (((keybuf) & QAT_COMN_PTR_TYPE_MASK) << \
+	   QAT_COMN_KEYBUF_USAGE_BITPOS))
+
 #define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
 	QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
 
@@ -249,6 +277,8 @@ struct icp_qat_fw_comn_resp {
 #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1
+#define QAT_COMN_RESP_INVALID_PARAM_BITPOS 1
+#define QAT_COMN_RESP_INVALID_PARAM_MASK 0x1
 #define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0
 #define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1
 
@@ -280,6 +310,10 @@ struct icp_qat_fw_comn_resp {
 	QAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \
 	QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK)
 
+#define ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_INVALID_PARAM_BITPOS, \
+	QAT_COMN_RESP_INVALID_PARAM_MASK)
+
 #define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
 #define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
 #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index 215b291b74..eba9f96685 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -22,14 +22,24 @@ enum icp_qat_fw_la_cmd_id {
 	ICP_QAT_FW_LA_CMD_DELIMITER = 18
 };
 
+/* In GEN_LCE Command ID 4 corresponds to AEAD */
+#define ICP_QAT_FW_LA_CMD_AEAD 4
+
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 #define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 
+/* GEN_LCE Hash, HMAC and GCM Verification Status */
+#define ICP_QAT_FW_LA_VER_STATUS_FAIL ICP_QAT_FW_COMN_GEN_LCE_STATUS_FLAG_ERROR
+
+
 struct icp_qat_fw_la_bulk_req {
 	struct icp_qat_fw_comn_req_hdr comn_hdr;
-	struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+	union {
+		struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+		struct lce_key_buff_desc key_buff;
+	};
 	struct icp_qat_fw_comn_req_mid comn_mid;
 	struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
 	struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
@@ -81,6 +91,21 @@ struct icp_qat_fw_la_bulk_req {
 #define ICP_QAT_FW_LA_PARTIAL_END 2
 #define QAT_LA_PARTIAL_BITPOS 0
 #define QAT_LA_PARTIAL_MASK 0x3
+
+/* GEN_LCE specific Crypto Flags fields */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS 6
+#define ICP_QAT_FW_SYM_AEAD_ALGO_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_SIZE_BITPOS 9
+#define ICP_QAT_FW_SYM_IV_SIZE_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS 11
+#define ICP_QAT_FW_SYM_IV_IN_DESC_MASK 0x1
+#define ICP_QAT_FW_SYM_IV_IN_DESC_VALID 1
+#define ICP_QAT_FW_SYM_DIRECTION_BITPOS 15
+#define ICP_QAT_FW_SYM_DIRECTION_MASK 0x1
+
+/* In GEN_LCE AEAD AES GCM Algorithm has ID 0 */
+#define QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE 0
+
 #define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
 	cmp_auth, ret_auth, update_state, \
 	ciph_iv, ciphcfg, partial) \
@@ -188,6 +213,23 @@ struct icp_qat_fw_la_bulk_req {
 	QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
 	QAT_LA_PARTIAL_MASK)
 
+/* GEN_LCE specific Crypto Flags operations */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS, \
+	ICP_QAT_FW_SYM_AEAD_ALGO_MASK)
+
+#define ICP_QAT_FW_SYM_IV_SIZE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_SIZE_BITPOS, \
+	ICP_QAT_FW_SYM_IV_SIZE_MASK)
+
+#define ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS, \
+	ICP_QAT_FW_SYM_IV_IN_DESC_MASK)
+
+#define ICP_QAT_FW_SYM_DIR_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_DIRECTION_BITPOS, \
+	ICP_QAT_FW_SYM_DIRECTION_MASK)
+
 #define QAT_FW_LA_MODE2 1
 #define QAT_FW_LA_NO_MODE2 0
 #define QAT_FW_LA_MODE2_MASK 0x1
@@ -424,4 +466,5 @@ struct icp_qat_fw_la_cipher_30_req_params {
 
 		} u;
 };
+
 #endif
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index f55dc3c6f0..18e652e393 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -62,6 +62,12 @@ static const struct rte_pci_id pci_id_qat_map[] = {
 		{
 			RTE_PCI_DEVICE(0x8086, 0x4945),
 		},
+		{
+			RTE_PCI_DEVICE(0x8086, 0x1454),
+		},
+		{
+			RTE_PCI_DEVICE(0x8086, 0x1456),
+		},
 		{.device_id = 0},
 };
 
@@ -199,6 +205,9 @@ pick_gen(const struct rte_pci_device *pci_dev)
 	case 0x4943:
 	case 0x4945:
 		return QAT_GEN4;
+	case 0x1454:
+	case 0x1456:
+		return QAT_GEN_LCE;
 	default:
 		QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
 		return QAT_N_GENS;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v3 3/4] crypto/qat: update headers for GEN LCE support
  2024-02-27  9:35 ` [PATCH v3 0/4] add new QAT gen3 and gen5 Nishikant Nayak
  2024-02-27  9:35   ` [PATCH v3 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
  2024-02-27  9:35   ` [PATCH v3 2/4] common/qat: update common driver to support " Nishikant Nayak
@ 2024-02-27  9:35   ` Nishikant Nayak
  2024-02-27  9:35   ` [PATCH v3 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
  3 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-27  9:35 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, Nishikant Nayak

This patch handles the changes required for updating the common
header fields specific to GEN LCE, Also added/updated of the response
processing APIs based on GEN LCE requirement.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
---
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
    - Added GEN LCE specific API for deque burst.
    - Fixed code formatting.
---
---
 drivers/crypto/qat/qat_sym.c         | 16 ++++++-
 drivers/crypto/qat/qat_sym.h         | 60 ++++++++++++++++++++++++++-
 drivers/crypto/qat/qat_sym_session.c | 62 +++++++++++++++++++++++++++-
 drivers/crypto/qat/qat_sym_session.h | 10 ++++-
 4 files changed, 140 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 6e03bde841..439a3fc00b 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -180,7 +180,15 @@ qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
 	return qat_dequeue_op_burst(qp, (void **)ops,
-				qat_sym_process_response, nb_ops);
+			qat_sym_process_response, nb_ops);
+}
+
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops,
+							uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+			qat_sym_process_response_gen_lce, nb_ops);
 }
 
 int
@@ -200,6 +208,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
 	struct rte_cryptodev *cryptodev;
 	struct qat_cryptodev_private *internals;
+	enum qat_device_gen qat_dev_gen = qat_pci_dev->qat_dev_gen;
 	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
 		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
 
@@ -249,7 +258,10 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
 	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
-	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
+	if (qat_dev_gen == QAT_GEN_LCE)
+		cryptodev->dequeue_burst = qat_sym_dequeue_burst_gen_lce;
+	else
+		cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
 	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f2f197d050..3461113c13 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -90,7 +90,7 @@
 /*
  * Maximum number of SGL entries
  */
-#define QAT_SYM_SGL_MAX_NUMBER	16
+#define QAT_SYM_SGL_MAX_NUMBER 16
 
 /* Maximum data length for single pass GMAC: 2^14-1 */
 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
@@ -142,6 +142,10 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 #ifdef RTE_QAT_OPENSSL
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
@@ -390,6 +394,52 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
 	return 1;
 }
 
+static __rte_always_inline int
+qat_sym_process_response_gen_lce(void **op, uint8_t *resp,
+	void *op_cookie __rte_unused,
+	uint64_t *dequeue_err_count __rte_unused)
+{
+	struct icp_qat_fw_comn_resp *resp_msg =
+		(struct icp_qat_fw_comn_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+		(resp_msg->opaque_data);
+	struct qat_sym_session *sess;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+		sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+	sess = CRYPTODEV_GET_SYM_SESS_PRIV(rx_op->sym->session);
+
+	rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+		ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+	else if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+		ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+	if (sess->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		if (ICP_QAT_FW_LA_VER_STATUS_FAIL ==
+			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+				resp_msg->comn_hdr.comn_status))
+			rx_op->status =	RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	}
+
+	*op = (void *)rx_op;
+
+	/*
+	 * return 1 as dequeue op only move on to the next op
+	 * if one was ready to return to API
+	 */
+	return 1;
+}
+
 int
 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
@@ -455,7 +505,13 @@ qat_sym_preprocess_requests(void **ops __rte_unused,
 
 static inline void
 qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
-	void *op_cookie __rte_unused)
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
+{
+}
+
+static inline void
+qat_sym_process_response_gen_lce(void **op __rte_unused, uint8_t *resp __rte_unused,
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
 {
 }
 
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 9f4f6c3d93..8f50b61365 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -136,6 +136,9 @@ qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 static void
 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session);
+
 /* Req/cd init functions */
 
 static void
@@ -738,6 +741,12 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		session->qat_cmd);
 		return -ENOTSUP;
 	}
+
+	if (qat_dev_gen == QAT_GEN_LCE) {
+		qat_sym_session_init_gen_lce_hdr(session);
+		return 0;
+	}
+
 	qat_sym_session_finalize(session);
 
 	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
@@ -1016,6 +1025,12 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 			dev->data->dev_private;
 	enum qat_device_gen qat_dev_gen =
 			internals->qat_dev->qat_dev_gen;
+	if (qat_dev_gen == QAT_GEN_LCE) {
+		struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+		struct lce_key_buff_desc *key_buff = &req_tmpl->key_buff;
+
+		key_buff->keybuff = session->key_paddr;
+	}
 
 	/*
 	 * Store AEAD IV parameters as cipher IV,
@@ -1079,9 +1094,15 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 	}
 
 	if (session->is_single_pass) {
-		if (qat_sym_cd_cipher_set(session,
+		if (qat_dev_gen != QAT_GEN_LCE) {
+			if (qat_sym_cd_cipher_set(session,
 				aead_xform->key.data, aead_xform->key.length))
-			return -EINVAL;
+				return -EINVAL;
+		} else {
+			session->auth_key_length = aead_xform->key.length;
+			memcpy(session->key_array, aead_xform->key.data,
+							aead_xform->key.length);
+		}
 	} else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
 			aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
 			(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
@@ -1970,6 +1991,43 @@ qat_sym_session_init_common_hdr(struct qat_sym_session *session)
 					ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
 }
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session)
+{
+	struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+	/*
+	 * GEN_LCE specifies separate command id for AEAD operations but Cryptodev
+	 * API processes AEAD operations as Single pass Crypto operations.
+	 * Hence even for GEN_LCE, Session Algo Command ID is CIPHER.
+	 * Note, however Session Algo Mode is AEAD.
+	 */
+	header->service_cmd_id = ICP_QAT_FW_LA_CMD_AEAD;
+	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+	header->hdr_flags =
+	ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(ICP_QAT_FW_COMN_REQ_FLAG_SET,
+			ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT);
+	header->comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(QAT_COMN_PTR_TYPE_SGL,
+			QAT_COMN_KEY_BUFFER_USED);
+
+	ICP_QAT_FW_SYM_AEAD_ALGO_SET(header->serv_specif_flags,
+		QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE);
+	ICP_QAT_FW_SYM_IV_SIZE_SET(header->serv_specif_flags,
+		ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+	ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(header->serv_specif_flags,
+		ICP_QAT_FW_SYM_IV_IN_DESC_VALID);
+
+	if (session->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
+			ICP_QAT_HW_CIPHER_DECRYPT);
+	} else {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
+			ICP_QAT_HW_CIPHER_ENCRYPT);
+	}
+}
+
 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 						const uint8_t *cipherkey,
 						uint32_t cipherkeylen)
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 9209e2e8df..958af03405 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -111,10 +111,16 @@ struct qat_sym_session {
 	enum icp_qat_hw_auth_op auth_op;
 	enum icp_qat_hw_auth_mode auth_mode;
 	void *bpi_ctx;
-	struct qat_sym_cd cd;
+	union {
+		struct qat_sym_cd cd;
+		uint8_t key_array[32];
+	};
 	uint8_t prefix_state[QAT_PREFIX_TBL_SIZE] __rte_cache_aligned;
 	uint8_t *cd_cur_ptr;
-	phys_addr_t cd_paddr;
+	union {
+		phys_addr_t cd_paddr;
+		phys_addr_t key_paddr;
+	};
 	phys_addr_t prefix_paddr;
 	struct icp_qat_fw_la_bulk_req fw_req;
 	uint8_t aad_len;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v3 4/4] test/cryptodev: add tests for GCM with AAD
  2024-02-27  9:35 ` [PATCH v3 0/4] add new QAT gen3 and gen5 Nishikant Nayak
                     ` (2 preceding siblings ...)
  2024-02-27  9:35   ` [PATCH v3 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
@ 2024-02-27  9:35   ` Nishikant Nayak
  3 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-27  9:35 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi,
	Nishikant Nayak, Akhil Goyal, Fan Zhang

Adding one new unit test code for validating the features
added as part of GCM with 64 byte AAD.
The new test case adds one new test for GCM algo for both
encrypt and decrypt operations.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
---
v2:
    - Removed unused code.
    - Added one new unit test, AAD with GCM for GEN LCE.
---
---
 app/test/test_cryptodev.c                   | 48 +++++++++++++---
 app/test/test_cryptodev_aead_test_vectors.h | 62 +++++++++++++++++++++
 2 files changed, 103 insertions(+), 7 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 38a65aa88f..edd23731f7 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -12494,6 +12494,18 @@ test_AES_GCM_auth_decryption_test_case_256_7(void)
 	return test_authenticated_decryption(&gcm_test_case_256_7);
 }
 
+static int
+test_AES_GCM_auth_decryption_test_case_256_8(void)
+{
+	return test_authenticated_decryption(&gcm_test_case_256_8);
+}
+
+static int
+test_AES_GCM_auth_encryption_test_case_256_8(void)
+{
+	return test_authenticated_encryption(&gcm_test_case_256_8);
+}
+
 static int
 test_AES_GCM_auth_decryption_test_case_aad_1(void)
 {
@@ -12613,10 +12625,16 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
@@ -12719,16 +12737,22 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
-		return TEST_SKIPPED;
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
 
 	/* not supported with CPU crypto and raw data-path APIs*/
 	if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO ||
 			global_api_test_type == CRYPTODEV_RAW_API_TEST)
 		return TEST_SKIPPED;
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
+		return TEST_SKIPPED;
 
 	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
 			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
@@ -15749,10 +15773,16 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	/*
@@ -17392,6 +17422,8 @@ static struct unit_test_suite cryptodev_aes_gcm_auth_testsuite  = {
 			test_AES_GCM_auth_encryption_test_case_256_6),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_auth_encryption_test_case_256_7),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encryption_test_case_256_8),
 
 		/** AES GCM Authenticated Decryption 256 bits key */
 		TEST_CASE_ST(ut_setup, ut_teardown,
@@ -17408,6 +17440,8 @@ static struct unit_test_suite cryptodev_aes_gcm_auth_testsuite  = {
 			test_AES_GCM_auth_decryption_test_case_256_6),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_auth_decryption_test_case_256_7),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_decryption_test_case_256_8),
 
 		/** AES GCM Authenticated Encryption big aad size */
 		TEST_CASE_ST(ut_setup, ut_teardown,
diff --git a/app/test/test_cryptodev_aead_test_vectors.h b/app/test/test_cryptodev_aead_test_vectors.h
index 07292620a4..eadf206e4d 100644
--- a/app/test/test_cryptodev_aead_test_vectors.h
+++ b/app/test/test_cryptodev_aead_test_vectors.h
@@ -17,6 +17,16 @@ static uint8_t gcm_aad_text[MAX_AAD_LENGTH] = {
 		0x00, 0xf1, 0xe2, 0xd3, 0xc4, 0xb5, 0xa6, 0x97,
 		0x88, 0x79, 0x6a, 0x5b, 0x4c, 0x3d, 0x2e, 0x1f };
 
+static uint8_t gcm_aad_64B_text[MAX_AAD_LENGTH] = {
+		0xED, 0x3E, 0xA8, 0x1F, 0x74, 0xE5, 0xD1, 0x96,
+		0xA4, 0xD5, 0x4B, 0x26, 0xBB, 0x20, 0x61, 0x7B,
+		0x3B, 0x9C, 0x2A, 0x69, 0x90, 0xEF, 0xD7, 0x9A,
+		0x94, 0xC2, 0xF5, 0x86, 0xBD, 0x00, 0xF6, 0xEA,
+		0x0B, 0x14, 0x24, 0xF2, 0x08, 0x67, 0x42, 0x3A,
+		0xB5, 0xB8, 0x32, 0x97, 0xB5, 0x99, 0x69, 0x75,
+		0x60, 0x00, 0x8F, 0xF7, 0x6F, 0x16, 0x52, 0x66,
+		0xF1, 0xA9, 0x38, 0xFD, 0xB0, 0x61, 0x60, 0xB5 };
+
 static uint8_t ccm_aad_test_1[8] = {
 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
 };
@@ -1736,6 +1746,58 @@ static const struct aead_test_data gcm_test_case_256_7 = {
 	}
 };
 
+static const struct aead_test_data gcm_test_case_256_8 = {
+	.algo = RTE_CRYPTO_AEAD_AES_GCM,
+	.key = {
+		.data = {
+			0xD8, 0xFD, 0x8F, 0x5A, 0x13, 0x7B, 0x05, 0x2C,
+			0xA4, 0x64, 0x7A, 0xDD, 0x1E, 0x9A, 0x68, 0x33,
+			0x04, 0x70, 0xE8, 0x1E, 0x42, 0x84, 0x64, 0xD2,
+			0x23, 0xA1, 0x6A, 0x0A, 0x05, 0x7B, 0x90, 0xDE},
+		.len = 32
+	},
+	.iv = {
+		.data = {
+			0x8D, 0xDF, 0xB8, 0x7F, 0xD0, 0x79, 0x77, 0x55,
+			0xD5, 0x48, 0x03, 0x05},
+		.len = 12
+	},
+	.aad = {
+		.data = gcm_aad_64B_text,
+		.len = 64
+	},
+	.plaintext = {
+		.data = {
+			0x4D, 0xBC, 0x2C, 0x7F, 0x25, 0x1F, 0x07, 0x25,
+			0x54, 0x8C, 0x43, 0xDB, 0xD8, 0x06, 0x9F, 0xBF,
+			0xCA, 0x60, 0xF4, 0xEF, 0x13, 0x87, 0xE8, 0x2F,
+			0x4D, 0x9D, 0x1D, 0x87, 0x9F, 0x91, 0x79, 0x7E,
+			0x3E, 0x98, 0xA3, 0x63, 0xC6, 0xFE, 0xDB, 0x35,
+			0x96, 0x59, 0xB2, 0x0C, 0x80, 0x96, 0x70, 0x07,
+			0x87, 0x42, 0xAB, 0x4F, 0x31, 0x73, 0xC4, 0xF9,
+			0xB0, 0x1E, 0xF1, 0xBC, 0x7D, 0x45, 0xE5, 0xF3},
+		.len = 64
+	},
+	.ciphertext = {
+	    .data = {
+			0x21, 0xFA, 0x59, 0x4F, 0x1F, 0x6B, 0x19, 0xC2,
+			0x68, 0xBC, 0x05, 0x93, 0x4E, 0x48, 0x6C, 0x5B,
+			0x0B, 0x7A, 0x43, 0xB7, 0x60, 0x8E, 0x00, 0xC4,
+			0xAB, 0x14, 0x6B, 0xCC, 0xA1, 0x27, 0x6A, 0xDE,
+			0x8E, 0xB6, 0x98, 0xBB, 0x4F, 0xD0, 0x6F, 0x30,
+			0x0F, 0x04, 0xA8, 0x5B, 0xDC, 0xD8, 0xE8, 0x8A,
+			0x73, 0xD9, 0xB8, 0x60, 0x7C, 0xE4, 0x32, 0x4C,
+			0x3A, 0x0B, 0xC2, 0x82, 0xDA, 0x88, 0x17, 0x69},
+	    .len = 64
+	},
+	.auth_tag = {
+		.data = {
+			0x3B, 0x80, 0x83, 0x72, 0xE5, 0x1B, 0x94, 0x15,
+			0x75, 0xC8, 0x62, 0xBC, 0xA1, 0x66, 0x91, 0x45},
+		.len = 16
+	}
+};
+
 /** variable AAD AES-GCM-128 Test Vectors */
 static const struct aead_test_data gcm_test_case_aad_1 = {
 	.algo = RTE_CRYPTO_AEAD_AES_GCM,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v4 0/4] add QAT GEN LCE device
  2023-12-20 13:26 [PATCH 1/4] common/qat: add files specific to GEN5 Nishikant Nayak
                   ` (5 preceding siblings ...)
  2024-02-27  9:35 ` [PATCH v3 0/4] add new QAT gen3 and gen5 Nishikant Nayak
@ 2024-02-27  9:40 ` Nishikant Nayak
  2024-02-27  9:40   ` [PATCH v4 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
                     ` (4 more replies)
  2024-02-27 11:33 ` [PATCH v5 " Nishikant Nayak
                   ` (3 subsequent siblings)
  10 siblings, 5 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-27  9:40 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, Nishikant Nayak

This patchset adds a new QAT LCE device.
The device currently only supports symmetric crypto,
and only the AES-GCM algorithm.

v4:
  - Fixed cover letter, v3 included the wrong details relating
    to another patchset.
v3:
  - Fixed typos in commit and code comments.
  - Replaced use of linux/kernel.h macro with local macro
    to fix ARM compilation in CI.
v2:
   - Renamed device from GEN 5 to GEN LCE.
   - Removed unused code.
   - Updated macro names.

Nishikant Nayak (4):
  common/qat: add files specific to GEN LCE
  common/qat: update common driver to support GEN LCE
  crypto/qat: update headers for GEN LCE support
  test/cryptodev: add tests for GCM with AAD

 .mailmap                                      |   1 +
 app/test/test_cryptodev.c                     |  48 ++-
 app/test/test_cryptodev_aead_test_vectors.h   |  62 ++++
 drivers/common/qat/dev/qat_dev_gen_lce.c      | 306 ++++++++++++++++
 drivers/common/qat/meson.build                |   2 +
 .../qat/qat_adf/adf_transport_access_macros.h |   1 +
 .../adf_transport_access_macros_gen_lce.h     |  51 +++
 .../adf_transport_access_macros_gen_lcevf.h   |  48 +++
 drivers/common/qat/qat_adf/icp_qat_fw.h       |  34 ++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    |  59 +++-
 drivers/common/qat/qat_common.h               |   1 +
 drivers/common/qat/qat_device.c               |   9 +
 .../crypto/qat/dev/qat_crypto_pmd_gen_lce.c   | 329 ++++++++++++++++++
 drivers/crypto/qat/qat_sym.c                  |  16 +-
 drivers/crypto/qat/qat_sym.h                  |  66 +++-
 drivers/crypto/qat/qat_sym_session.c          |  62 +++-
 drivers/crypto/qat/qat_sym_session.h          |  10 +-
 17 files changed, 1089 insertions(+), 16 deletions(-)
 create mode 100644 drivers/common/qat/dev/qat_dev_gen_lce.c
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
 create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c

-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v4 1/4] common/qat: add files specific to GEN LCE
  2024-02-27  9:40 ` [PATCH v4 0/4] add QAT GEN LCE device Nishikant Nayak
@ 2024-02-27  9:40   ` Nishikant Nayak
  2024-02-27  9:40   ` [PATCH v4 2/4] common/qat: update common driver to support " Nishikant Nayak
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-27  9:40 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi,
	Nishikant Nayak, Thomas Monjalon, Anatoly Burakov

Adding GEN5 files for handling GEN LCE specific operations.
These files are inherited from the existing files/APIs
which has some changes specific GEN5 requirements
Also updated the mailmap file.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
---
v3:
    - Removed use of linux/kernel.h macro to fix ARM compilation.
    - Fixed typo in commit body and code comment.
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
---
---
 .mailmap                                      |   1 +
 drivers/common/qat/dev/qat_dev_gen_lce.c      | 306 ++++++++++++++++
 drivers/common/qat/meson.build                |   2 +
 .../adf_transport_access_macros_gen_lce.h     |  51 +++
 .../adf_transport_access_macros_gen_lcevf.h   |  48 +++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    |  14 +
 drivers/common/qat/qat_common.h               |   1 +
 .../crypto/qat/dev/qat_crypto_pmd_gen_lce.c   | 329 ++++++++++++++++++
 drivers/crypto/qat/qat_sym.h                  |   6 +
 9 files changed, 758 insertions(+)
 create mode 100644 drivers/common/qat/dev/qat_dev_gen_lce.c
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
 create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c

diff --git a/.mailmap b/.mailmap
index 58cca13ef6..8008e5a899 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1036,6 +1036,7 @@ Ning Li <muziding001@163.com> <lining18@jd.com>
 Nipun Gupta <nipun.gupta@amd.com> <nipun.gupta@nxp.com>
 Nir Efrati <nir.efrati@intel.com>
 Nirmoy Das <ndas@suse.de>
+Nishikant Nayak <nishikanta.nayak@intel.com>
 Nithin Dabilpuram <ndabilpuram@marvell.com> <nithin.dabilpuram@caviumnetworks.com>
 Nitin Saxena <nitin.saxena@caviumnetworks.com>
 Nitzan Weller <nitzanwe@mellanox.com>
diff --git a/drivers/common/qat/dev/qat_dev_gen_lce.c b/drivers/common/qat/dev/qat_dev_gen_lce.c
new file mode 100644
index 0000000000..27219ff942
--- /dev/null
+++ b/drivers/common/qat/dev/qat_dev_gen_lce.c
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_pci.h>
+#include <rte_vfio.h>
+
+#include "qat_device.h"
+#include "qat_qp.h"
+#include "adf_transport_access_macros_gen_lcevf.h"
+#include "adf_pf2vf_msg.h"
+#include "qat_pf2vf.h"
+
+#include <stdint.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#define BITS_PER_ULONG		(sizeof(unsigned long) * 8)
+
+#define VFIO_PCI_LCE_DEVICE_CFG_REGION_INDEX	VFIO_PCI_NUM_REGIONS
+#define VFIO_PCI_LCE_CY_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 2)
+#define VFIO_PCI_LCE_RING_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 4)
+#define LCE_DEVICE_NAME_SIZE			64
+#define LCE_DEVICE_MAX_BANKS			2080
+#define LCE_DIV_ROUND_UP(n, d)  (((n) + (d) - 1) / (d))
+#define LCE_DEVICE_BITMAP_SIZE  \
+	LCE_DIV_ROUND_UP(LCE_DEVICE_MAX_BANKS, BITS_PER_ULONG)
+
+/* QAT GEN_LCE specific macros */
+#define QAT_GEN_LCE_BUNDLE_NUM		LCE_DEVICE_MAX_BANKS
+#define QAT_GEN4_QPS_PER_BUNDLE_NUM	1
+
+/**
+ * struct lce_vfio_dev_cap - LCE device capabilities
+ *
+ * Device level capabilities and service level capabilities
+ */
+struct lce_vfio_dev_cap {
+	uint16_t device_num;
+	uint16_t device_type;
+	uint32_t capability_mask;
+	uint32_t extended_capabilities;
+	uint16_t max_banks;
+	uint16_t max_rings_per_bank;
+	uint16_t arb_mask;
+	uint16_t services;
+	uint16_t pkg_id;
+	uint16_t node_id;
+	__u8 device_name[LCE_DEVICE_NAME_SIZE];
+};
+
+/* struct lce_vfio_dev_cy_cap - CY capabilities of LCE device */
+struct lce_vfio_dev_cy_cap {
+	uint32_t nr_banks;
+	unsigned long bitmap[LCE_DEVICE_BITMAP_SIZE];
+};
+
+struct lce_qat_domain {
+	uint32_t nid        :3;
+	uint32_t fid        :7;
+	uint32_t ftype      :2;
+	uint32_t vfid       :13;
+	uint32_t rid        :4;
+	uint32_t vld        :1;
+	uint32_t desc_over  :1;
+	uint32_t pasid_vld  :1;
+	uint32_t pasid      :20;
+};
+
+struct lce_qat_buf_domain {
+	uint32_t bank_id:   20;
+	uint32_t type:      4;
+	uint32_t resv:      8;
+	struct lce_qat_domain dom;
+};
+
+struct qat_dev_gen_lce_extra {
+	struct qat_qp_hw_data
+	    qp_gen_lce_data[QAT_GEN_LCE_BUNDLE_NUM][QAT_GEN4_QPS_PER_BUNDLE_NUM];
+};
+
+static struct qat_pf2vf_dev qat_pf2vf_gen_lce = {
+	.pf2vf_offset = ADF_4XXXIOV_PF2VM_OFFSET,
+	.vf2pf_offset = ADF_4XXXIOV_VM2PF_OFFSET,
+	.pf2vf_type_shift = ADF_PFVF_2X_MSGTYPE_SHIFT,
+	.pf2vf_type_mask = ADF_PFVF_2X_MSGTYPE_MASK,
+	.pf2vf_data_shift = ADF_PFVF_2X_MSGDATA_SHIFT,
+	.pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK,
+};
+
+static int
+qat_select_valid_queue_gen_lce(struct qat_pci_device *qat_dev, int qp_id,
+			    enum qat_service_type service_type)
+{
+	int i = 0, valid_qps = 0;
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+	for (; i < QAT_GEN_LCE_BUNDLE_NUM; i++) {
+		if (dev_extra->qp_gen_lce_data[i][0].service_type ==
+				service_type) {
+			if (valid_qps == qp_id)
+				return i;
+			++valid_qps;
+		}
+	}
+	return -1;
+}
+
+static const struct qat_qp_hw_data *
+qat_qp_get_hw_data_gen_lce(struct qat_pci_device *qat_dev,
+			enum qat_service_type service_type, uint16_t qp_id)
+{
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+	int ring_pair = qat_select_valid_queue_gen_lce(qat_dev, qp_id,
+								service_type);
+
+	if (ring_pair < 0)
+		return NULL;
+
+	return &dev_extra->qp_gen_lce_data[ring_pair][0];
+}
+
+static int
+qat_qp_rings_per_service_gen_lce(struct qat_pci_device *qat_dev,
+			      enum qat_service_type service)
+{
+	int i = 0, count = 0, max_ops_per_srv = 0;
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+	max_ops_per_srv = QAT_GEN_LCE_BUNDLE_NUM;
+	for (i = 0, count = 0; i < max_ops_per_srv; i++)
+		if (dev_extra->qp_gen_lce_data[i][0].service_type == service)
+			count++;
+	return count;
+}
+
+static int qat_dev_read_config_gen_lce(struct qat_pci_device *qat_dev)
+{
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+	struct qat_qp_hw_data *hw_data;
+
+	/** Enable only crypto ring: RP-0 */
+	hw_data = &dev_extra->qp_gen_lce_data[0][0];
+	memset(hw_data, 0, sizeof(*hw_data));
+
+	hw_data->service_type = QAT_SERVICE_SYMMETRIC;
+	hw_data->tx_msg_size = 128;
+	hw_data->rx_msg_size = 32;
+
+	hw_data->tx_ring_num = 0;
+	hw_data->rx_ring_num = 1;
+
+	hw_data->hw_bundle_num = 0;
+
+	return 0;
+}
+
+static void qat_qp_build_ring_base_gen_lce(void *io_addr, struct qat_queue *queue)
+{
+	uint64_t queue_base;
+
+	queue_base = BUILD_RING_BASE_ADDR_GEN_LCE(queue->base_phys_addr,
+					       queue->queue_size);
+	WRITE_CSR_RING_BASE_GEN_LCEVF(io_addr, queue->hw_bundle_number,
+				   queue->hw_queue_number, queue_base);
+}
+
+static void
+qat_qp_adf_arb_enable_gen_lce(const struct qat_queue *txq,
+			   void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+	    (ADF_RING_BUNDLE_SIZE_GEN_LCE *
+	     txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,
+			   arb_csr_offset);
+	value |= 0x01;
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_arb_disable_gen_lce(const struct qat_queue *txq,
+			    void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_RING_BUNDLE_SIZE_GEN_LCE *
+							txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,
+			   arb_csr_offset);
+	value &= ~(0x01);
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_configure_queues_gen_lce(struct qat_qp *qp)
+{
+	uint32_t q_tx_config, q_resp_config;
+	struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q;
+
+	/* q_tx/rx->queue_size is initialized as per bundle config register */
+	q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size);
+
+	q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size,
+					       ADF_RING_NEAR_WATERMARK_512,
+					       ADF_RING_NEAR_WATERMARK_0);
+
+	WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_tx->hw_bundle_number,
+				     q_tx->hw_queue_number, q_tx_config);
+	WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_rx->hw_bundle_number,
+				     q_rx->hw_queue_number, q_resp_config);
+}
+
+static void
+qat_qp_csr_write_tail_gen_lce(struct qat_qp *qp, struct qat_queue *q)
+{
+	WRITE_CSR_RING_TAIL_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, q->tail);
+}
+
+static void
+qat_qp_csr_write_head_gen_lce(struct qat_qp *qp, struct qat_queue *q,
+			   uint32_t new_head)
+{
+	WRITE_CSR_RING_HEAD_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, new_head);
+}
+
+static void
+qat_qp_csr_setup_gen_lce(struct qat_pci_device *qat_dev, void *io_addr,
+		      struct qat_qp *qp)
+{
+	qat_qp_build_ring_base_gen_lce(io_addr, &qp->tx_q);
+	qat_qp_build_ring_base_gen_lce(io_addr, &qp->rx_q);
+	qat_qp_adf_configure_queues_gen_lce(qp);
+	qat_qp_adf_arb_enable_gen_lce(&qp->tx_q, qp->mmap_bar_addr,
+				   &qat_dev->arb_csr_lock);
+}
+
+static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen_lce = {
+	.qat_qp_rings_per_service = qat_qp_rings_per_service_gen_lce,
+	.qat_qp_build_ring_base = qat_qp_build_ring_base_gen_lce,
+	.qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen_lce,
+	.qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen_lce,
+	.qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen_lce,
+	.qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen_lce,
+	.qat_qp_csr_write_head = qat_qp_csr_write_head_gen_lce,
+	.qat_qp_csr_setup = qat_qp_csr_setup_gen_lce,
+	.qat_qp_get_hw_data = qat_qp_get_hw_data_gen_lce,
+};
+
+static int
+qat_reset_ring_pairs_gen_lce(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static const struct rte_mem_resource*
+qat_dev_get_transport_bar_gen_lce(struct rte_pci_device *pci_dev)
+{
+	return &pci_dev->mem_resource[0];
+}
+
+static int
+qat_dev_get_misc_bar_gen_lce(struct rte_mem_resource **mem_resource,
+			  struct rte_pci_device *pci_dev)
+{
+	*mem_resource = &pci_dev->mem_resource[2];
+	return 0;
+}
+
+static int
+qat_dev_get_extra_size_gen_lce(void)
+{
+	return sizeof(struct qat_dev_gen_lce_extra);
+}
+
+static int
+qat_dev_get_slice_map_gen_lce(uint32_t *map __rte_unused,
+	const struct rte_pci_device *pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen_lce = {
+	.qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen_lce,
+	.qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen_lce,
+	.qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen_lce,
+	.qat_dev_read_config = qat_dev_read_config_gen_lce,
+	.qat_dev_get_extra_size = qat_dev_get_extra_size_gen_lce,
+	.qat_dev_get_slice_map = qat_dev_get_slice_map_gen_lce,
+};
+
+RTE_INIT(qat_dev_gen_lce_init)
+{
+	qat_qp_hw_spec[QAT_GEN_LCE] = &qat_qp_hw_spec_gen_lce;
+	qat_dev_hw_spec[QAT_GEN_LCE] = &qat_dev_hw_spec_gen_lce;
+	qat_gen_config[QAT_GEN_LCE].dev_gen = QAT_GEN_LCE;
+	qat_gen_config[QAT_GEN_LCE].pf2vf_dev = &qat_pf2vf_gen_lce;
+}
diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 62abcb6fe3..bc7c3e5b85 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -82,6 +82,7 @@ sources += files(
         'dev/qat_dev_gen2.c',
         'dev/qat_dev_gen3.c',
         'dev/qat_dev_gen4.c',
+        'dev/qat_dev_gen_lce.c',
 )
 includes += include_directories(
         'qat_adf',
@@ -108,6 +109,7 @@ if qat_crypto
             'dev/qat_crypto_pmd_gen2.c',
             'dev/qat_crypto_pmd_gen3.c',
             'dev/qat_crypto_pmd_gen4.c',
+            'dev/qat_crypto_pmd_gen_lce.c',
         ]
         sources += files(join_paths(qat_crypto_relpath, f))
     endforeach
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
new file mode 100644
index 0000000000..c9df8f5dd2
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+
+#include "adf_transport_access_macros.h"
+
+#define ADF_RINGS_PER_INT_SRCSEL_GEN4 2
+#define ADF_BANK_INT_SRC_SEL_MASK_GEN4 0x44UL
+#define ADF_BANK_INT_FLAG_CLEAR_MASK_GEN4 0x3
+#define ADF_RING_BUNDLE_SIZE_GEN_LCE 0x2000
+#define ADF_RING_CSR_RING_CONFIG_GEN_LCE 0x1000
+#define ADF_RING_CSR_RING_LBASE_GEN_LCE 0x1040
+#define ADF_RING_CSR_RING_UBASE_GEN_LCE 0x1080
+
+#define BUILD_RING_BASE_ADDR_GEN_LCE(addr, size) \
+	((((addr) >> 6) & (0xFFFFFFFFFFFFFFFFULL << (size))) << 6)
+
+#define WRITE_CSR_RING_BASE_GEN_LCE(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32);	\
+	ADF_CSR_WR(csr_base_addr,	\
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) +			\
+		ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2),		\
+		l_base);						\
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) +			\
+		ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+
+#define WRITE_CSR_RING_HEAD_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+
+#endif
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
new file mode 100644
index 0000000000..3c7232de12
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+
+#include "adf_transport_access_macros.h"
+#include "adf_transport_access_macros_gen_lce.h"
+
+#define ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF 0x0
+
+#define WRITE_CSR_RING_BASE_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2),	\
+		l_base);	\
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_HEAD_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_SRV_ARB_EN_GEN_LCEVF(csr_base_addr, bank, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+
+#endif
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index 70f0effa62..215b291b74 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -410,4 +410,18 @@ struct icp_qat_fw_la_cipher_20_req_params {
 	uint8_t    spc_auth_res_sz;
 };
 
+struct icp_qat_fw_la_cipher_30_req_params {
+		uint32_t   spc_aad_sz;
+		uint8_t    cipher_length;
+		uint8_t    reserved[2];
+		uint8_t    spc_auth_res_sz;
+		union {
+				uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+				struct {
+						uint64_t cipher_IV_ptr;
+						uint64_t resrvd1;
+			} s;
+
+		} u;
+};
 #endif
diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h
index 9411a79301..642e009f28 100644
--- a/drivers/common/qat/qat_common.h
+++ b/drivers/common/qat/qat_common.h
@@ -21,6 +21,7 @@ enum qat_device_gen {
 	QAT_GEN2,
 	QAT_GEN3,
 	QAT_GEN4,
+	QAT_GEN_LCE,
 	QAT_N_GENS
 };
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
new file mode 100644
index 0000000000..3f1668b3d3
--- /dev/null
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
@@ -0,0 +1,329 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
+#include "qat_sym_session.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
+#include "qat_crypto.h"
+#include "qat_crypto_pmd_gens.h"
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen_lce[] = {
+	QAT_SYM_AEAD_CAP(AES_GCM,
+		CAP_SET(block_size, 16),
+		CAP_RNG(key_size, 32, 32, 0), CAP_RNG(digest_size, 16, 16, 0),
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+qat_sgl_add_buffer_gen_lce(void *list_in, uint64_t addr, uint32_t len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr;
+
+	nr = list->num_bufs;
+
+	if (nr >= QAT_SYM_SGL_MAX_NUMBER) {
+		QAT_DP_LOG(ERR, "Adding %d entry failed, no empty SGL buffer", nr);
+		return -EINVAL;
+	}
+
+	list->buffers[nr].len = len;
+	list->buffers[nr].resrvd = 0;
+	list->buffers[nr].addr = addr;
+
+	list->num_bufs++;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+	QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+		nr, list->buffers[nr].len, list->buffers[nr].addr);
+#endif
+	return 0;
+}
+
+static int
+qat_sgl_fill_array_with_mbuf(struct rte_mbuf *buf, int64_t offset,
+	void *list_in, uint32_t data_len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr, buf_len;
+	int res = -EINVAL;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	uint32_t start_idx;
+	start_idx = list->num_bufs;
+#endif
+
+	/* Append to the existing list */
+	nr = list->num_bufs;
+
+	for (buf_len = 0; buf && nr < QAT_SYM_SGL_MAX_NUMBER; buf = buf->next) {
+		if (offset >= rte_pktmbuf_data_len(buf)) {
+			offset -= rte_pktmbuf_data_len(buf);
+			/* Jump to next mbuf */
+			continue;
+		}
+
+		list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset;
+		list->buffers[nr].resrvd = 0;
+		list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
+
+		offset = 0;
+		buf_len += list->buffers[nr].len;
+
+		if (buf_len >= data_len) {
+			list->buffers[nr].len -= buf_len - data_len;
+			res = 0;
+			break;
+		}
+		++nr;
+	}
+
+	if (unlikely(res != 0)) {
+		if (nr == QAT_SYM_SGL_MAX_NUMBER)
+			QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+				QAT_SYM_SGL_MAX_NUMBER);
+		else
+			QAT_DP_LOG(ERR, "Mbuf chain is too short");
+	} else {
+
+		list->num_bufs = ++nr;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+		for (nr = start_idx; nr < list->num_bufs; nr++) {
+			QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+				nr, list->buffers[nr].len,
+				list->buffers[nr].addr);
+		}
+#endif
+	}
+
+	return res;
+}
+
+static int
+qat_sym_build_op_aead_gen_lce(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_op *op = in_op;
+	uint64_t digest_phys_addr, aad_phys_addr;
+	uint16_t iv_len, aad_len, digest_len, key_len;
+	uint32_t cipher_ofs, iv_offset, cipher_len;
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct icp_qat_fw_la_cipher_30_req_params *cipher_param;
+	enum icp_qat_hw_cipher_dir dir;
+	bool is_digest_adjacent = false;
+
+	if (ctx->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER ||
+		ctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_AES256 ||
+		ctx->qat_mode != ICP_QAT_HW_CIPHER_AEAD_MODE) {
+
+		QAT_DP_LOG(ERR, "Not supported (cmd: %d, alg: %d, mode: %d). "
+			"GEN_LCE PMD only supports AES-256 AEAD mode",
+			ctx->qat_cmd, ctx->qat_cipher_alg, ctx->qat_mode);
+		return -EINVAL;
+	}
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+	cipher_param = (void *)&qat_req->serv_specif_rqpars;
+
+	dir = ctx->qat_dir;
+
+	aad_phys_addr = op->sym->aead.aad.phys_addr;
+	aad_len = ctx->aad_len;
+
+	iv_offset = ctx->cipher_iv.offset;
+	iv_len = ctx->cipher_iv.length;
+
+	cipher_ofs = op->sym->aead.data.offset;
+	cipher_len = op->sym->aead.data.length;
+
+	digest_phys_addr = op->sym->aead.digest.phys_addr;
+	digest_len = ctx->digest_length;
+
+	/* Up to 16B IV can be directly embedded in descriptor.
+	 *  GCM supports only 12B IV for GEN LCE
+	 */
+	if (iv_len != GCM_IV_LENGTH_GEN_LCE) {
+		QAT_DP_LOG(ERR, "iv_len: %d not supported. Must be 12B.",
+			iv_len);
+		return -EINVAL;
+	}
+
+	rte_memcpy(cipher_param->u.cipher_IV_array,
+		rte_crypto_op_ctod_offset(op, uint8_t*, iv_offset),
+		iv_len);
+
+	/* Always SGL */
+	RTE_ASSERT((qat_req->comn_hdr.comn_req_flags &
+		ICP_QAT_FW_SYM_COMM_ADDR_SGL) == 1);
+	/* Always inplace */
+	RTE_ASSERT(op->sym->m_dst == NULL);
+
+	/* Key buffer address is already programmed by reusing the
+	 * content-descriptor buffer
+	 */
+	key_len = ctx->auth_key_length;
+
+	cipher_param->spc_aad_sz = aad_len;
+	cipher_param->cipher_length = key_len;
+	cipher_param->spc_auth_res_sz = digest_len;
+
+	/* Knowing digest is contiguous to cipher-text helps optimizing SGL */
+	if (rte_pktmbuf_iova_offset(op->sym->m_src, cipher_ofs + cipher_len)
+		== digest_phys_addr)
+		is_digest_adjacent = true;
+
+	/* SRC-SGL: 3 entries:
+	 * a) AAD
+	 * b) cipher
+	 * c) digest (only for decrypt and buffer is_NOT_adjacent)
+	 *
+	 */
+	cookie->qat_sgl_src.num_bufs = 0;
+	if (aad_len)
+		qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src, aad_phys_addr,
+			aad_len);
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_src,
+			cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_src,
+			cipher_len);
+
+		/* Digest buffer in decrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_DECRYPT)
+			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src,
+				digest_phys_addr, digest_len);
+	}
+
+	/* (in-place) DST-SGL: 2 entries:
+	 * a) cipher
+	 * b) digest (only for encrypt and buffer is_NOT_adjacent)
+	 */
+	cookie->qat_sgl_dst.num_bufs = 0;
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_dst,
+			cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_dst,
+			cipher_len);
+
+		/* Digest buffer in Encrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_dst,
+				digest_phys_addr, digest_len);
+	}
+
+	/* Length values in 128B descriptor */
+	qat_req->comn_mid.src_length = cipher_len;
+	qat_req->comn_mid.dst_length = cipher_len;
+
+	if (dir == ICP_QAT_HW_CIPHER_ENCRYPT) /* Digest buffer in Encrypt job */
+		qat_req->comn_mid.dst_length += GCM_256_DIGEST_LEN;
+
+	/* src & dst SGL addresses in 128B descriptor */
+	qat_req->comn_mid.src_data_addr = cookie->qat_sgl_src_phys_addr;
+	qat_req->comn_mid.dest_data_addr = cookie->qat_sgl_dst_phys_addr;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+		sizeof(struct icp_qat_fw_la_bulk_req));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
+		rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+		rte_pktmbuf_data_len(op->sym->m_src));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
+		digest_len);
+	QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data, aad_len);
+#endif
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen_lce(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+
+	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
+		return -EINVAL;
+
+	/* build request for aead */
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES256 &&
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) {
+		build_request = qat_sym_build_op_aead_gen_lce;
+		ctx->build_request[proc_type] = build_request;
+	}
+	return 0;
+}
+
+
+static int
+qat_sym_crypto_cap_get_gen_lce(struct qat_cryptodev_private *internals,
+	const char *capa_memz_name,
+	const uint16_t __rte_unused slice_map)
+{
+	const uint32_t size = sizeof(qat_sym_crypto_caps_gen_lce);
+	uint32_t i;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+			size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities");
+			return -1;
+		}
+	}
+
+	struct rte_cryptodev_capabilities *addr =
+		(struct rte_cryptodev_capabilities *)
+		internals->capa_mz->addr;
+	const struct rte_cryptodev_capabilities *capabilities =
+		qat_sym_crypto_caps_gen_lce;
+	const uint32_t capa_num =
+		size / sizeof(struct rte_cryptodev_capabilities);
+	uint32_t curr_capa = 0;
+
+	for (i = 0; i < capa_num; i++) {
+		memcpy(addr + curr_capa, capabilities + i,
+			sizeof(struct rte_cryptodev_capabilities));
+		curr_capa++;
+	}
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	return 0;
+}
+
+RTE_INIT(qat_sym_crypto_gen_lce_init)
+{
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = &qat_sym_crypto_ops_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_capabilities =
+			qat_sym_crypto_cap_get_gen_lce;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_session =
+			qat_sym_crypto_set_session_gen_lce;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_raw_dp_ctx = NULL;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags =
+			qat_sym_crypto_feature_flags_get_gen1;
+}
+
+RTE_INIT(qat_asym_crypto_gen_lce_init)
+{
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_capabilities = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].set_session = NULL;
+}
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f976009bf2..f2f197d050 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -95,6 +95,12 @@
 /* Maximum data length for single pass GMAC: 2^14-1 */
 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
 
+/* Digest length for GCM Algo is 16 bytes */
+#define GCM_256_DIGEST_LEN 16
+
+/* IV length for GCM algo is 12 bytes */
+#define GCM_IV_LENGTH_GEN_LCE 12
+
 struct qat_sym_session;
 
 struct qat_sym_sgl {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v4 2/4] common/qat: update common driver to support GEN LCE
  2024-02-27  9:40 ` [PATCH v4 0/4] add QAT GEN LCE device Nishikant Nayak
  2024-02-27  9:40   ` [PATCH v4 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
@ 2024-02-27  9:40   ` Nishikant Nayak
  2024-02-27  9:40   ` [PATCH v4 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-27  9:40 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, Nishikant Nayak

Adding GEN LCE specific macros which is required for updating
the support for GEN LCE features.
Also this patch adds other macros which is being used by GEN LCE
Specific APIs.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
---
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
    - Fixed code formatting
---
---
 .../qat/qat_adf/adf_transport_access_macros.h |  1 +
 drivers/common/qat/qat_adf/icp_qat_fw.h       | 34 ++++++++++++++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    | 45 ++++++++++++++++++-
 drivers/common/qat/qat_device.c               |  9 ++++
 4 files changed, 88 insertions(+), 1 deletion(-)

diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
index 12a7258c60..19bd812419 100644
--- a/drivers/common/qat/qat_adf/adf_transport_access_macros.h
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
@@ -47,6 +47,7 @@
 #define ADF_RING_SIZE_512 0x03
 #define ADF_RING_SIZE_4K 0x06
 #define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_64K 0x0A
 #define ADF_RING_SIZE_4M 0x10
 #define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
 #define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw.h b/drivers/common/qat/qat_adf/icp_qat_fw.h
index 3aa17ae041..b78158e01d 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw.h
@@ -57,6 +57,12 @@ struct icp_qat_fw_comn_req_hdr_cd_pars {
 	} u;
 };
 
+struct lce_key_buff_desc {
+	uint64_t keybuff;
+	uint32_t keybuff_resrvd1;
+	uint32_t keybuff_resrvd2;
+};
+
 struct icp_qat_fw_comn_req_mid {
 	uint64_t opaque_data;
 	uint64_t src_data_addr;
@@ -123,6 +129,12 @@ struct icp_qat_fw_comn_resp {
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_BITPOS 0
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_MASK 0x1
 
+/* GEN_LCE specific Common Header fields */
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS 5
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_MASK 0x3
+#define ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT 3
+#define ICP_QAT_FW_COMN_GEN_LCE_STATUS_FLAG_ERROR 0
+
 #define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
 	icp_qat_fw_comn_req_hdr_t.service_type
 
@@ -168,6 +180,12 @@ struct icp_qat_fw_comn_resp {
 	(((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
 	 ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
 
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(valid, desc_layout) \
+	((((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+	ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) | \
+	(((desc_layout) & ICP_QAT_FW_COMN_DESC_LAYOUT_MASK) << \
+	ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS))
+
 #define QAT_COMN_PTR_TYPE_BITPOS 0
 #define QAT_COMN_PTR_TYPE_MASK 0x1
 #define QAT_COMN_CD_FLD_TYPE_BITPOS 1
@@ -180,10 +198,20 @@ struct icp_qat_fw_comn_resp {
 #define QAT_COMN_EXT_FLAGS_MASK 0x1
 #define QAT_COMN_EXT_FLAGS_USED 0x1
 
+/* GEN_LCE specific Common Request Flags fields */
+#define QAT_COMN_KEYBUF_USAGE_BITPOS 1
+#define QAT_COMN_KEYBUF_USAGE_MASK 0x1
+#define QAT_COMN_KEY_BUFFER_USED 1
+
 #define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
 	((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
 	 | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
 
+#define ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(ptr, keybuf) \
+	((((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS) | \
+	 (((keybuf) & QAT_COMN_PTR_TYPE_MASK) << \
+	   QAT_COMN_KEYBUF_USAGE_BITPOS))
+
 #define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
 	QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
 
@@ -249,6 +277,8 @@ struct icp_qat_fw_comn_resp {
 #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1
+#define QAT_COMN_RESP_INVALID_PARAM_BITPOS 1
+#define QAT_COMN_RESP_INVALID_PARAM_MASK 0x1
 #define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0
 #define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1
 
@@ -280,6 +310,10 @@ struct icp_qat_fw_comn_resp {
 	QAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \
 	QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK)
 
+#define ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_INVALID_PARAM_BITPOS, \
+	QAT_COMN_RESP_INVALID_PARAM_MASK)
+
 #define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
 #define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
 #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index 215b291b74..eba9f96685 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -22,14 +22,24 @@ enum icp_qat_fw_la_cmd_id {
 	ICP_QAT_FW_LA_CMD_DELIMITER = 18
 };
 
+/* In GEN_LCE Command ID 4 corresponds to AEAD */
+#define ICP_QAT_FW_LA_CMD_AEAD 4
+
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 #define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 
+/* GEN_LCE Hash, HMAC and GCM Verification Status */
+#define ICP_QAT_FW_LA_VER_STATUS_FAIL ICP_QAT_FW_COMN_GEN_LCE_STATUS_FLAG_ERROR
+
+
 struct icp_qat_fw_la_bulk_req {
 	struct icp_qat_fw_comn_req_hdr comn_hdr;
-	struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+	union {
+		struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+		struct lce_key_buff_desc key_buff;
+	};
 	struct icp_qat_fw_comn_req_mid comn_mid;
 	struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
 	struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
@@ -81,6 +91,21 @@ struct icp_qat_fw_la_bulk_req {
 #define ICP_QAT_FW_LA_PARTIAL_END 2
 #define QAT_LA_PARTIAL_BITPOS 0
 #define QAT_LA_PARTIAL_MASK 0x3
+
+/* GEN_LCE specific Crypto Flags fields */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS 6
+#define ICP_QAT_FW_SYM_AEAD_ALGO_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_SIZE_BITPOS 9
+#define ICP_QAT_FW_SYM_IV_SIZE_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS 11
+#define ICP_QAT_FW_SYM_IV_IN_DESC_MASK 0x1
+#define ICP_QAT_FW_SYM_IV_IN_DESC_VALID 1
+#define ICP_QAT_FW_SYM_DIRECTION_BITPOS 15
+#define ICP_QAT_FW_SYM_DIRECTION_MASK 0x1
+
+/* In GEN_LCE AEAD AES GCM Algorithm has ID 0 */
+#define QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE 0
+
 #define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
 	cmp_auth, ret_auth, update_state, \
 	ciph_iv, ciphcfg, partial) \
@@ -188,6 +213,23 @@ struct icp_qat_fw_la_bulk_req {
 	QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
 	QAT_LA_PARTIAL_MASK)
 
+/* GEN_LCE specific Crypto Flags operations */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS, \
+	ICP_QAT_FW_SYM_AEAD_ALGO_MASK)
+
+#define ICP_QAT_FW_SYM_IV_SIZE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_SIZE_BITPOS, \
+	ICP_QAT_FW_SYM_IV_SIZE_MASK)
+
+#define ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS, \
+	ICP_QAT_FW_SYM_IV_IN_DESC_MASK)
+
+#define ICP_QAT_FW_SYM_DIR_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_DIRECTION_BITPOS, \
+	ICP_QAT_FW_SYM_DIRECTION_MASK)
+
 #define QAT_FW_LA_MODE2 1
 #define QAT_FW_LA_NO_MODE2 0
 #define QAT_FW_LA_MODE2_MASK 0x1
@@ -424,4 +466,5 @@ struct icp_qat_fw_la_cipher_30_req_params {
 
 		} u;
 };
+
 #endif
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index f55dc3c6f0..18e652e393 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -62,6 +62,12 @@ static const struct rte_pci_id pci_id_qat_map[] = {
 		{
 			RTE_PCI_DEVICE(0x8086, 0x4945),
 		},
+		{
+			RTE_PCI_DEVICE(0x8086, 0x1454),
+		},
+		{
+			RTE_PCI_DEVICE(0x8086, 0x1456),
+		},
 		{.device_id = 0},
 };
 
@@ -199,6 +205,9 @@ pick_gen(const struct rte_pci_device *pci_dev)
 	case 0x4943:
 	case 0x4945:
 		return QAT_GEN4;
+	case 0x1454:
+	case 0x1456:
+		return QAT_GEN_LCE;
 	default:
 		QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
 		return QAT_N_GENS;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v4 3/4] crypto/qat: update headers for GEN LCE support
  2024-02-27  9:40 ` [PATCH v4 0/4] add QAT GEN LCE device Nishikant Nayak
  2024-02-27  9:40   ` [PATCH v4 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
  2024-02-27  9:40   ` [PATCH v4 2/4] common/qat: update common driver to support " Nishikant Nayak
@ 2024-02-27  9:40   ` Nishikant Nayak
  2024-02-27  9:40   ` [PATCH v4 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
  2024-02-27  9:54   ` [PATCH v4 0/4] add QAT GEN LCE device Power, Ciara
  4 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-27  9:40 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, Nishikant Nayak

This patch handles the changes required for updating the common
header fields specific to GEN LCE, Also added/updated of the response
processing APIs based on GEN LCE requirement.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
---
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
    - Added GEN LCE specific API for deque burst.
    - Fixed code formatting.
---
---
 drivers/crypto/qat/qat_sym.c         | 16 ++++++-
 drivers/crypto/qat/qat_sym.h         | 60 ++++++++++++++++++++++++++-
 drivers/crypto/qat/qat_sym_session.c | 62 +++++++++++++++++++++++++++-
 drivers/crypto/qat/qat_sym_session.h | 10 ++++-
 4 files changed, 140 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 6e03bde841..439a3fc00b 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -180,7 +180,15 @@ qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
 	return qat_dequeue_op_burst(qp, (void **)ops,
-				qat_sym_process_response, nb_ops);
+			qat_sym_process_response, nb_ops);
+}
+
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops,
+							uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+			qat_sym_process_response_gen_lce, nb_ops);
 }
 
 int
@@ -200,6 +208,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
 	struct rte_cryptodev *cryptodev;
 	struct qat_cryptodev_private *internals;
+	enum qat_device_gen qat_dev_gen = qat_pci_dev->qat_dev_gen;
 	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
 		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
 
@@ -249,7 +258,10 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
 	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
-	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
+	if (qat_dev_gen == QAT_GEN_LCE)
+		cryptodev->dequeue_burst = qat_sym_dequeue_burst_gen_lce;
+	else
+		cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
 	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f2f197d050..3461113c13 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -90,7 +90,7 @@
 /*
  * Maximum number of SGL entries
  */
-#define QAT_SYM_SGL_MAX_NUMBER	16
+#define QAT_SYM_SGL_MAX_NUMBER 16
 
 /* Maximum data length for single pass GMAC: 2^14-1 */
 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
@@ -142,6 +142,10 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 #ifdef RTE_QAT_OPENSSL
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
@@ -390,6 +394,52 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
 	return 1;
 }
 
+static __rte_always_inline int
+qat_sym_process_response_gen_lce(void **op, uint8_t *resp,
+	void *op_cookie __rte_unused,
+	uint64_t *dequeue_err_count __rte_unused)
+{
+	struct icp_qat_fw_comn_resp *resp_msg =
+		(struct icp_qat_fw_comn_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+		(resp_msg->opaque_data);
+	struct qat_sym_session *sess;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+		sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+	sess = CRYPTODEV_GET_SYM_SESS_PRIV(rx_op->sym->session);
+
+	rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+		ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+	else if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+		ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+	if (sess->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		if (ICP_QAT_FW_LA_VER_STATUS_FAIL ==
+			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+				resp_msg->comn_hdr.comn_status))
+			rx_op->status =	RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	}
+
+	*op = (void *)rx_op;
+
+	/*
+	 * return 1 as dequeue op only move on to the next op
+	 * if one was ready to return to API
+	 */
+	return 1;
+}
+
 int
 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
@@ -455,7 +505,13 @@ qat_sym_preprocess_requests(void **ops __rte_unused,
 
 static inline void
 qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
-	void *op_cookie __rte_unused)
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
+{
+}
+
+static inline void
+qat_sym_process_response_gen_lce(void **op __rte_unused, uint8_t *resp __rte_unused,
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
 {
 }
 
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 9f4f6c3d93..8f50b61365 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -136,6 +136,9 @@ qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 static void
 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session);
+
 /* Req/cd init functions */
 
 static void
@@ -738,6 +741,12 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		session->qat_cmd);
 		return -ENOTSUP;
 	}
+
+	if (qat_dev_gen == QAT_GEN_LCE) {
+		qat_sym_session_init_gen_lce_hdr(session);
+		return 0;
+	}
+
 	qat_sym_session_finalize(session);
 
 	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
@@ -1016,6 +1025,12 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 			dev->data->dev_private;
 	enum qat_device_gen qat_dev_gen =
 			internals->qat_dev->qat_dev_gen;
+	if (qat_dev_gen == QAT_GEN_LCE) {
+		struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+		struct lce_key_buff_desc *key_buff = &req_tmpl->key_buff;
+
+		key_buff->keybuff = session->key_paddr;
+	}
 
 	/*
 	 * Store AEAD IV parameters as cipher IV,
@@ -1079,9 +1094,15 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 	}
 
 	if (session->is_single_pass) {
-		if (qat_sym_cd_cipher_set(session,
+		if (qat_dev_gen != QAT_GEN_LCE) {
+			if (qat_sym_cd_cipher_set(session,
 				aead_xform->key.data, aead_xform->key.length))
-			return -EINVAL;
+				return -EINVAL;
+		} else {
+			session->auth_key_length = aead_xform->key.length;
+			memcpy(session->key_array, aead_xform->key.data,
+							aead_xform->key.length);
+		}
 	} else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
 			aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
 			(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
@@ -1970,6 +1991,43 @@ qat_sym_session_init_common_hdr(struct qat_sym_session *session)
 					ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
 }
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session)
+{
+	struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+	/*
+	 * GEN_LCE specifies separate command id for AEAD operations but Cryptodev
+	 * API processes AEAD operations as Single pass Crypto operations.
+	 * Hence even for GEN_LCE, Session Algo Command ID is CIPHER.
+	 * Note, however Session Algo Mode is AEAD.
+	 */
+	header->service_cmd_id = ICP_QAT_FW_LA_CMD_AEAD;
+	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+	header->hdr_flags =
+	ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(ICP_QAT_FW_COMN_REQ_FLAG_SET,
+			ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT);
+	header->comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(QAT_COMN_PTR_TYPE_SGL,
+			QAT_COMN_KEY_BUFFER_USED);
+
+	ICP_QAT_FW_SYM_AEAD_ALGO_SET(header->serv_specif_flags,
+		QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE);
+	ICP_QAT_FW_SYM_IV_SIZE_SET(header->serv_specif_flags,
+		ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+	ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(header->serv_specif_flags,
+		ICP_QAT_FW_SYM_IV_IN_DESC_VALID);
+
+	if (session->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
+			ICP_QAT_HW_CIPHER_DECRYPT);
+	} else {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
+			ICP_QAT_HW_CIPHER_ENCRYPT);
+	}
+}
+
 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 						const uint8_t *cipherkey,
 						uint32_t cipherkeylen)
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 9209e2e8df..958af03405 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -111,10 +111,16 @@ struct qat_sym_session {
 	enum icp_qat_hw_auth_op auth_op;
 	enum icp_qat_hw_auth_mode auth_mode;
 	void *bpi_ctx;
-	struct qat_sym_cd cd;
+	union {
+		struct qat_sym_cd cd;
+		uint8_t key_array[32];
+	};
 	uint8_t prefix_state[QAT_PREFIX_TBL_SIZE] __rte_cache_aligned;
 	uint8_t *cd_cur_ptr;
-	phys_addr_t cd_paddr;
+	union {
+		phys_addr_t cd_paddr;
+		phys_addr_t key_paddr;
+	};
 	phys_addr_t prefix_paddr;
 	struct icp_qat_fw_la_bulk_req fw_req;
 	uint8_t aad_len;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v4 4/4] test/cryptodev: add tests for GCM with AAD
  2024-02-27  9:40 ` [PATCH v4 0/4] add QAT GEN LCE device Nishikant Nayak
                     ` (2 preceding siblings ...)
  2024-02-27  9:40   ` [PATCH v4 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
@ 2024-02-27  9:40   ` Nishikant Nayak
  2024-02-27  9:54   ` [PATCH v4 0/4] add QAT GEN LCE device Power, Ciara
  4 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-27  9:40 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi,
	Nishikant Nayak, Akhil Goyal, Fan Zhang

Adding one new unit test code for validating the features
added as part of GCM with 64 byte AAD.
The new test case adds one new test for GCM algo for both
encrypt and decrypt operations.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
---
v2:
    - Removed unused code.
    - Added one new unit test, AAD with GCM for GEN LCE.
---
---
 app/test/test_cryptodev.c                   | 48 +++++++++++++---
 app/test/test_cryptodev_aead_test_vectors.h | 62 +++++++++++++++++++++
 2 files changed, 103 insertions(+), 7 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 38a65aa88f..edd23731f7 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -12494,6 +12494,18 @@ test_AES_GCM_auth_decryption_test_case_256_7(void)
 	return test_authenticated_decryption(&gcm_test_case_256_7);
 }
 
+static int
+test_AES_GCM_auth_decryption_test_case_256_8(void)
+{
+	return test_authenticated_decryption(&gcm_test_case_256_8);
+}
+
+static int
+test_AES_GCM_auth_encryption_test_case_256_8(void)
+{
+	return test_authenticated_encryption(&gcm_test_case_256_8);
+}
+
 static int
 test_AES_GCM_auth_decryption_test_case_aad_1(void)
 {
@@ -12613,10 +12625,16 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
@@ -12719,16 +12737,22 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
-		return TEST_SKIPPED;
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
 
 	/* not supported with CPU crypto and raw data-path APIs*/
 	if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO ||
 			global_api_test_type == CRYPTODEV_RAW_API_TEST)
 		return TEST_SKIPPED;
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
+		return TEST_SKIPPED;
 
 	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
 			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
@@ -15749,10 +15773,16 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	/*
@@ -17392,6 +17422,8 @@ static struct unit_test_suite cryptodev_aes_gcm_auth_testsuite  = {
 			test_AES_GCM_auth_encryption_test_case_256_6),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_auth_encryption_test_case_256_7),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encryption_test_case_256_8),
 
 		/** AES GCM Authenticated Decryption 256 bits key */
 		TEST_CASE_ST(ut_setup, ut_teardown,
@@ -17408,6 +17440,8 @@ static struct unit_test_suite cryptodev_aes_gcm_auth_testsuite  = {
 			test_AES_GCM_auth_decryption_test_case_256_6),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_auth_decryption_test_case_256_7),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_decryption_test_case_256_8),
 
 		/** AES GCM Authenticated Encryption big aad size */
 		TEST_CASE_ST(ut_setup, ut_teardown,
diff --git a/app/test/test_cryptodev_aead_test_vectors.h b/app/test/test_cryptodev_aead_test_vectors.h
index 07292620a4..eadf206e4d 100644
--- a/app/test/test_cryptodev_aead_test_vectors.h
+++ b/app/test/test_cryptodev_aead_test_vectors.h
@@ -17,6 +17,16 @@ static uint8_t gcm_aad_text[MAX_AAD_LENGTH] = {
 		0x00, 0xf1, 0xe2, 0xd3, 0xc4, 0xb5, 0xa6, 0x97,
 		0x88, 0x79, 0x6a, 0x5b, 0x4c, 0x3d, 0x2e, 0x1f };
 
+static uint8_t gcm_aad_64B_text[MAX_AAD_LENGTH] = {
+		0xED, 0x3E, 0xA8, 0x1F, 0x74, 0xE5, 0xD1, 0x96,
+		0xA4, 0xD5, 0x4B, 0x26, 0xBB, 0x20, 0x61, 0x7B,
+		0x3B, 0x9C, 0x2A, 0x69, 0x90, 0xEF, 0xD7, 0x9A,
+		0x94, 0xC2, 0xF5, 0x86, 0xBD, 0x00, 0xF6, 0xEA,
+		0x0B, 0x14, 0x24, 0xF2, 0x08, 0x67, 0x42, 0x3A,
+		0xB5, 0xB8, 0x32, 0x97, 0xB5, 0x99, 0x69, 0x75,
+		0x60, 0x00, 0x8F, 0xF7, 0x6F, 0x16, 0x52, 0x66,
+		0xF1, 0xA9, 0x38, 0xFD, 0xB0, 0x61, 0x60, 0xB5 };
+
 static uint8_t ccm_aad_test_1[8] = {
 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
 };
@@ -1736,6 +1746,58 @@ static const struct aead_test_data gcm_test_case_256_7 = {
 	}
 };
 
+static const struct aead_test_data gcm_test_case_256_8 = {
+	.algo = RTE_CRYPTO_AEAD_AES_GCM,
+	.key = {
+		.data = {
+			0xD8, 0xFD, 0x8F, 0x5A, 0x13, 0x7B, 0x05, 0x2C,
+			0xA4, 0x64, 0x7A, 0xDD, 0x1E, 0x9A, 0x68, 0x33,
+			0x04, 0x70, 0xE8, 0x1E, 0x42, 0x84, 0x64, 0xD2,
+			0x23, 0xA1, 0x6A, 0x0A, 0x05, 0x7B, 0x90, 0xDE},
+		.len = 32
+	},
+	.iv = {
+		.data = {
+			0x8D, 0xDF, 0xB8, 0x7F, 0xD0, 0x79, 0x77, 0x55,
+			0xD5, 0x48, 0x03, 0x05},
+		.len = 12
+	},
+	.aad = {
+		.data = gcm_aad_64B_text,
+		.len = 64
+	},
+	.plaintext = {
+		.data = {
+			0x4D, 0xBC, 0x2C, 0x7F, 0x25, 0x1F, 0x07, 0x25,
+			0x54, 0x8C, 0x43, 0xDB, 0xD8, 0x06, 0x9F, 0xBF,
+			0xCA, 0x60, 0xF4, 0xEF, 0x13, 0x87, 0xE8, 0x2F,
+			0x4D, 0x9D, 0x1D, 0x87, 0x9F, 0x91, 0x79, 0x7E,
+			0x3E, 0x98, 0xA3, 0x63, 0xC6, 0xFE, 0xDB, 0x35,
+			0x96, 0x59, 0xB2, 0x0C, 0x80, 0x96, 0x70, 0x07,
+			0x87, 0x42, 0xAB, 0x4F, 0x31, 0x73, 0xC4, 0xF9,
+			0xB0, 0x1E, 0xF1, 0xBC, 0x7D, 0x45, 0xE5, 0xF3},
+		.len = 64
+	},
+	.ciphertext = {
+	    .data = {
+			0x21, 0xFA, 0x59, 0x4F, 0x1F, 0x6B, 0x19, 0xC2,
+			0x68, 0xBC, 0x05, 0x93, 0x4E, 0x48, 0x6C, 0x5B,
+			0x0B, 0x7A, 0x43, 0xB7, 0x60, 0x8E, 0x00, 0xC4,
+			0xAB, 0x14, 0x6B, 0xCC, 0xA1, 0x27, 0x6A, 0xDE,
+			0x8E, 0xB6, 0x98, 0xBB, 0x4F, 0xD0, 0x6F, 0x30,
+			0x0F, 0x04, 0xA8, 0x5B, 0xDC, 0xD8, 0xE8, 0x8A,
+			0x73, 0xD9, 0xB8, 0x60, 0x7C, 0xE4, 0x32, 0x4C,
+			0x3A, 0x0B, 0xC2, 0x82, 0xDA, 0x88, 0x17, 0x69},
+	    .len = 64
+	},
+	.auth_tag = {
+		.data = {
+			0x3B, 0x80, 0x83, 0x72, 0xE5, 0x1B, 0x94, 0x15,
+			0x75, 0xC8, 0x62, 0xBC, 0xA1, 0x66, 0x91, 0x45},
+		.len = 16
+	}
+};
+
 /** variable AAD AES-GCM-128 Test Vectors */
 static const struct aead_test_data gcm_test_case_aad_1 = {
 	.algo = RTE_CRYPTO_AEAD_AES_GCM,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* RE: [PATCH v4 0/4] add QAT GEN LCE device
  2024-02-27  9:40 ` [PATCH v4 0/4] add QAT GEN LCE device Nishikant Nayak
                     ` (3 preceding siblings ...)
  2024-02-27  9:40   ` [PATCH v4 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
@ 2024-02-27  9:54   ` Power, Ciara
  2024-02-29  9:47     ` Kusztal, ArkadiuszX
  4 siblings, 1 reply; 47+ messages in thread
From: Power, Ciara @ 2024-02-27  9:54 UTC (permalink / raw)
  To: Nayak, Nishikanta, dev; +Cc: Ji, Kai, Kusztal, ArkadiuszX, S Joshi, Rakesh



> -----Original Message-----
> From: Nayak, Nishikanta <nishikanta.nayak@intel.com>
> Sent: Tuesday, February 27, 2024 9:40 AM
> To: dev@dpdk.org
> Cc: Power, Ciara <ciara.power@intel.com>; Ji, Kai <kai.ji@intel.com>; Kusztal,
> ArkadiuszX <arkadiuszx.kusztal@intel.com>; S Joshi, Rakesh
> <rakesh.s.joshi@intel.com>; Nayak, Nishikanta <nishikanta.nayak@intel.com>
> Subject: [PATCH v4 0/4] add QAT GEN LCE device
> 
> This patchset adds a new QAT LCE device.
> The device currently only supports symmetric crypto, and only the AES-GCM
> algorithm.
> 
> v4:
>   - Fixed cover letter, v3 included the wrong details relating
>     to another patchset.
> v3:
>   - Fixed typos in commit and code comments.
>   - Replaced use of linux/kernel.h macro with local macro
>     to fix ARM compilation in CI.
> v2:
>    - Renamed device from GEN 5 to GEN LCE.
>    - Removed unused code.
>    - Updated macro names.
> 
> Nishikant Nayak (4):
>   common/qat: add files specific to GEN LCE
>   common/qat: update common driver to support GEN LCE
>   crypto/qat: update headers for GEN LCE support
>   test/cryptodev: add tests for GCM with AAD

Series-acked-by: Ciara Power <ciara.power@intel.com>

^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v5 0/4] add QAT GEN LCE device
  2023-12-20 13:26 [PATCH 1/4] common/qat: add files specific to GEN5 Nishikant Nayak
                   ` (6 preceding siblings ...)
  2024-02-27  9:40 ` [PATCH v4 0/4] add QAT GEN LCE device Nishikant Nayak
@ 2024-02-27 11:33 ` Nishikant Nayak
  2024-02-27 11:33   ` [PATCH v5 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
                     ` (3 more replies)
  2024-02-28 14:00 ` [PATCH v6 0/4] add QAT GEN LCE device Nishikant Nayak
                   ` (2 subsequent siblings)
  10 siblings, 4 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-27 11:33 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, Nishikant Nayak

This patchset adds a new QAT LCE device.
The device currently only supports symmetric crypto,
and only the AES-GCM algorithm.

v5:
  - Fixed compilation issue by replacing __u8 with uint8_t.
v4:
  - Fixed cover letter, v3 included the wrong details relating
    to another patchset.
v3:
  - Fixed typos in commit and code comments.
  - Replaced use of linux/kernel.h macro with local macro
    to fix ARM compilation in CI.
v2:
   - Renamed device from GEN 5 to GEN LCE.
   - Removed unused code.
   - Updated macro names.

Nishikant Nayak (4):
  common/qat: add files specific to GEN LCE
  common/qat: update common driver to support GEN LCE
  crypto/qat: update headers for GEN LCE support
  test/cryptodev: add tests for GCM with AAD

 .mailmap                                      |   1 +
 app/test/test_cryptodev.c                     |  48 ++-
 app/test/test_cryptodev_aead_test_vectors.h   |  62 ++++
 drivers/common/qat/dev/qat_dev_gen_lce.c      | 306 ++++++++++++++++
 drivers/common/qat/meson.build                |   2 +
 .../qat/qat_adf/adf_transport_access_macros.h |   1 +
 .../adf_transport_access_macros_gen_lce.h     |  51 +++
 .../adf_transport_access_macros_gen_lcevf.h   |  48 +++
 drivers/common/qat/qat_adf/icp_qat_fw.h       |  34 ++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    |  59 +++-
 drivers/common/qat/qat_common.h               |   1 +
 drivers/common/qat/qat_device.c               |   9 +
 .../crypto/qat/dev/qat_crypto_pmd_gen_lce.c   | 329 ++++++++++++++++++
 drivers/crypto/qat/qat_sym.c                  |  16 +-
 drivers/crypto/qat/qat_sym.h                  |  66 +++-
 drivers/crypto/qat/qat_sym_session.c          |  62 +++-
 drivers/crypto/qat/qat_sym_session.h          |  10 +-
 17 files changed, 1089 insertions(+), 16 deletions(-)
 create mode 100644 drivers/common/qat/dev/qat_dev_gen_lce.c
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
 create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c

-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v5 1/4] common/qat: add files specific to GEN LCE
  2024-02-27 11:33 ` [PATCH v5 " Nishikant Nayak
@ 2024-02-27 11:33   ` Nishikant Nayak
  2024-02-27 11:33   ` [PATCH v5 2/4] common/qat: update common driver to support " Nishikant Nayak
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-27 11:33 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi,
	Nishikant Nayak, Thomas Monjalon, Anatoly Burakov

Adding GEN5 files for handling GEN LCE specific operations.
These files are inherited from the existing files/APIs
which has some changes specific GEN5 requirements
Also updated the mailmap file.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
Acked-by: Ciara Power <ciara.power@intel.com>
---
v5:
    - Replaced usage of __u8 with uint8_t.
v3:
    - Removed use of linux/kernel.h macro to fix ARM compilation.
    - Fixed typo in commit body and code comment.
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
---
---
 .mailmap                                      |   1 +
 drivers/common/qat/dev/qat_dev_gen_lce.c      | 306 ++++++++++++++++
 drivers/common/qat/meson.build                |   2 +
 .../adf_transport_access_macros_gen_lce.h     |  51 +++
 .../adf_transport_access_macros_gen_lcevf.h   |  48 +++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    |  14 +
 drivers/common/qat/qat_common.h               |   1 +
 .../crypto/qat/dev/qat_crypto_pmd_gen_lce.c   | 329 ++++++++++++++++++
 drivers/crypto/qat/qat_sym.h                  |   6 +
 9 files changed, 758 insertions(+)
 create mode 100644 drivers/common/qat/dev/qat_dev_gen_lce.c
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
 create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c

diff --git a/.mailmap b/.mailmap
index 58cca13ef6..8008e5a899 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1036,6 +1036,7 @@ Ning Li <muziding001@163.com> <lining18@jd.com>
 Nipun Gupta <nipun.gupta@amd.com> <nipun.gupta@nxp.com>
 Nir Efrati <nir.efrati@intel.com>
 Nirmoy Das <ndas@suse.de>
+Nishikant Nayak <nishikanta.nayak@intel.com>
 Nithin Dabilpuram <ndabilpuram@marvell.com> <nithin.dabilpuram@caviumnetworks.com>
 Nitin Saxena <nitin.saxena@caviumnetworks.com>
 Nitzan Weller <nitzanwe@mellanox.com>
diff --git a/drivers/common/qat/dev/qat_dev_gen_lce.c b/drivers/common/qat/dev/qat_dev_gen_lce.c
new file mode 100644
index 0000000000..5faaefc2d8
--- /dev/null
+++ b/drivers/common/qat/dev/qat_dev_gen_lce.c
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_pci.h>
+#include <rte_vfio.h>
+
+#include "qat_device.h"
+#include "qat_qp.h"
+#include "adf_transport_access_macros_gen_lcevf.h"
+#include "adf_pf2vf_msg.h"
+#include "qat_pf2vf.h"
+
+#include <stdint.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#define BITS_PER_ULONG		(sizeof(unsigned long) * 8)
+
+#define VFIO_PCI_LCE_DEVICE_CFG_REGION_INDEX	VFIO_PCI_NUM_REGIONS
+#define VFIO_PCI_LCE_CY_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 2)
+#define VFIO_PCI_LCE_RING_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 4)
+#define LCE_DEVICE_NAME_SIZE			64
+#define LCE_DEVICE_MAX_BANKS			2080
+#define LCE_DIV_ROUND_UP(n, d)  (((n) + (d) - 1) / (d))
+#define LCE_DEVICE_BITMAP_SIZE  \
+	LCE_DIV_ROUND_UP(LCE_DEVICE_MAX_BANKS, BITS_PER_ULONG)
+
+/* QAT GEN_LCE specific macros */
+#define QAT_GEN_LCE_BUNDLE_NUM		LCE_DEVICE_MAX_BANKS
+#define QAT_GEN4_QPS_PER_BUNDLE_NUM	1
+
+/**
+ * struct lce_vfio_dev_cap - LCE device capabilities
+ *
+ * Device level capabilities and service level capabilities
+ */
+struct lce_vfio_dev_cap {
+	uint16_t device_num;
+	uint16_t device_type;
+	uint32_t capability_mask;
+	uint32_t extended_capabilities;
+	uint16_t max_banks;
+	uint16_t max_rings_per_bank;
+	uint16_t arb_mask;
+	uint16_t services;
+	uint16_t pkg_id;
+	uint16_t node_id;
+	uint8_t device_name[LCE_DEVICE_NAME_SIZE];
+};
+
+/* struct lce_vfio_dev_cy_cap - CY capabilities of LCE device */
+struct lce_vfio_dev_cy_cap {
+	uint32_t nr_banks;
+	unsigned long bitmap[LCE_DEVICE_BITMAP_SIZE];
+};
+
+struct lce_qat_domain {
+	uint32_t nid        :3;
+	uint32_t fid        :7;
+	uint32_t ftype      :2;
+	uint32_t vfid       :13;
+	uint32_t rid        :4;
+	uint32_t vld        :1;
+	uint32_t desc_over  :1;
+	uint32_t pasid_vld  :1;
+	uint32_t pasid      :20;
+};
+
+struct lce_qat_buf_domain {
+	uint32_t bank_id:   20;
+	uint32_t type:      4;
+	uint32_t resv:      8;
+	struct lce_qat_domain dom;
+};
+
+struct qat_dev_gen_lce_extra {
+	struct qat_qp_hw_data
+	    qp_gen_lce_data[QAT_GEN_LCE_BUNDLE_NUM][QAT_GEN4_QPS_PER_BUNDLE_NUM];
+};
+
+static struct qat_pf2vf_dev qat_pf2vf_gen_lce = {
+	.pf2vf_offset = ADF_4XXXIOV_PF2VM_OFFSET,
+	.vf2pf_offset = ADF_4XXXIOV_VM2PF_OFFSET,
+	.pf2vf_type_shift = ADF_PFVF_2X_MSGTYPE_SHIFT,
+	.pf2vf_type_mask = ADF_PFVF_2X_MSGTYPE_MASK,
+	.pf2vf_data_shift = ADF_PFVF_2X_MSGDATA_SHIFT,
+	.pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK,
+};
+
+static int
+qat_select_valid_queue_gen_lce(struct qat_pci_device *qat_dev, int qp_id,
+			    enum qat_service_type service_type)
+{
+	int i = 0, valid_qps = 0;
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+	for (; i < QAT_GEN_LCE_BUNDLE_NUM; i++) {
+		if (dev_extra->qp_gen_lce_data[i][0].service_type ==
+				service_type) {
+			if (valid_qps == qp_id)
+				return i;
+			++valid_qps;
+		}
+	}
+	return -1;
+}
+
+static const struct qat_qp_hw_data *
+qat_qp_get_hw_data_gen_lce(struct qat_pci_device *qat_dev,
+			enum qat_service_type service_type, uint16_t qp_id)
+{
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+	int ring_pair = qat_select_valid_queue_gen_lce(qat_dev, qp_id,
+								service_type);
+
+	if (ring_pair < 0)
+		return NULL;
+
+	return &dev_extra->qp_gen_lce_data[ring_pair][0];
+}
+
+static int
+qat_qp_rings_per_service_gen_lce(struct qat_pci_device *qat_dev,
+			      enum qat_service_type service)
+{
+	int i = 0, count = 0, max_ops_per_srv = 0;
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+	max_ops_per_srv = QAT_GEN_LCE_BUNDLE_NUM;
+	for (i = 0, count = 0; i < max_ops_per_srv; i++)
+		if (dev_extra->qp_gen_lce_data[i][0].service_type == service)
+			count++;
+	return count;
+}
+
+static int qat_dev_read_config_gen_lce(struct qat_pci_device *qat_dev)
+{
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+	struct qat_qp_hw_data *hw_data;
+
+	/** Enable only crypto ring: RP-0 */
+	hw_data = &dev_extra->qp_gen_lce_data[0][0];
+	memset(hw_data, 0, sizeof(*hw_data));
+
+	hw_data->service_type = QAT_SERVICE_SYMMETRIC;
+	hw_data->tx_msg_size = 128;
+	hw_data->rx_msg_size = 32;
+
+	hw_data->tx_ring_num = 0;
+	hw_data->rx_ring_num = 1;
+
+	hw_data->hw_bundle_num = 0;
+
+	return 0;
+}
+
+static void qat_qp_build_ring_base_gen_lce(void *io_addr, struct qat_queue *queue)
+{
+	uint64_t queue_base;
+
+	queue_base = BUILD_RING_BASE_ADDR_GEN_LCE(queue->base_phys_addr,
+					       queue->queue_size);
+	WRITE_CSR_RING_BASE_GEN_LCEVF(io_addr, queue->hw_bundle_number,
+				   queue->hw_queue_number, queue_base);
+}
+
+static void
+qat_qp_adf_arb_enable_gen_lce(const struct qat_queue *txq,
+			   void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+	    (ADF_RING_BUNDLE_SIZE_GEN_LCE *
+	     txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,
+			   arb_csr_offset);
+	value |= 0x01;
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_arb_disable_gen_lce(const struct qat_queue *txq,
+			    void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_RING_BUNDLE_SIZE_GEN_LCE *
+							txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,
+			   arb_csr_offset);
+	value &= ~(0x01);
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_configure_queues_gen_lce(struct qat_qp *qp)
+{
+	uint32_t q_tx_config, q_resp_config;
+	struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q;
+
+	/* q_tx/rx->queue_size is initialized as per bundle config register */
+	q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size);
+
+	q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size,
+					       ADF_RING_NEAR_WATERMARK_512,
+					       ADF_RING_NEAR_WATERMARK_0);
+
+	WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_tx->hw_bundle_number,
+				     q_tx->hw_queue_number, q_tx_config);
+	WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_rx->hw_bundle_number,
+				     q_rx->hw_queue_number, q_resp_config);
+}
+
+static void
+qat_qp_csr_write_tail_gen_lce(struct qat_qp *qp, struct qat_queue *q)
+{
+	WRITE_CSR_RING_TAIL_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, q->tail);
+}
+
+static void
+qat_qp_csr_write_head_gen_lce(struct qat_qp *qp, struct qat_queue *q,
+			   uint32_t new_head)
+{
+	WRITE_CSR_RING_HEAD_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, new_head);
+}
+
+static void
+qat_qp_csr_setup_gen_lce(struct qat_pci_device *qat_dev, void *io_addr,
+		      struct qat_qp *qp)
+{
+	qat_qp_build_ring_base_gen_lce(io_addr, &qp->tx_q);
+	qat_qp_build_ring_base_gen_lce(io_addr, &qp->rx_q);
+	qat_qp_adf_configure_queues_gen_lce(qp);
+	qat_qp_adf_arb_enable_gen_lce(&qp->tx_q, qp->mmap_bar_addr,
+				   &qat_dev->arb_csr_lock);
+}
+
+static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen_lce = {
+	.qat_qp_rings_per_service = qat_qp_rings_per_service_gen_lce,
+	.qat_qp_build_ring_base = qat_qp_build_ring_base_gen_lce,
+	.qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen_lce,
+	.qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen_lce,
+	.qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen_lce,
+	.qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen_lce,
+	.qat_qp_csr_write_head = qat_qp_csr_write_head_gen_lce,
+	.qat_qp_csr_setup = qat_qp_csr_setup_gen_lce,
+	.qat_qp_get_hw_data = qat_qp_get_hw_data_gen_lce,
+};
+
+static int
+qat_reset_ring_pairs_gen_lce(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static const struct rte_mem_resource*
+qat_dev_get_transport_bar_gen_lce(struct rte_pci_device *pci_dev)
+{
+	return &pci_dev->mem_resource[0];
+}
+
+static int
+qat_dev_get_misc_bar_gen_lce(struct rte_mem_resource **mem_resource,
+			  struct rte_pci_device *pci_dev)
+{
+	*mem_resource = &pci_dev->mem_resource[2];
+	return 0;
+}
+
+static int
+qat_dev_get_extra_size_gen_lce(void)
+{
+	return sizeof(struct qat_dev_gen_lce_extra);
+}
+
+static int
+qat_dev_get_slice_map_gen_lce(uint32_t *map __rte_unused,
+	const struct rte_pci_device *pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen_lce = {
+	.qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen_lce,
+	.qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen_lce,
+	.qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen_lce,
+	.qat_dev_read_config = qat_dev_read_config_gen_lce,
+	.qat_dev_get_extra_size = qat_dev_get_extra_size_gen_lce,
+	.qat_dev_get_slice_map = qat_dev_get_slice_map_gen_lce,
+};
+
+RTE_INIT(qat_dev_gen_lce_init)
+{
+	qat_qp_hw_spec[QAT_GEN_LCE] = &qat_qp_hw_spec_gen_lce;
+	qat_dev_hw_spec[QAT_GEN_LCE] = &qat_dev_hw_spec_gen_lce;
+	qat_gen_config[QAT_GEN_LCE].dev_gen = QAT_GEN_LCE;
+	qat_gen_config[QAT_GEN_LCE].pf2vf_dev = &qat_pf2vf_gen_lce;
+}
diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 62abcb6fe3..bc7c3e5b85 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -82,6 +82,7 @@ sources += files(
         'dev/qat_dev_gen2.c',
         'dev/qat_dev_gen3.c',
         'dev/qat_dev_gen4.c',
+        'dev/qat_dev_gen_lce.c',
 )
 includes += include_directories(
         'qat_adf',
@@ -108,6 +109,7 @@ if qat_crypto
             'dev/qat_crypto_pmd_gen2.c',
             'dev/qat_crypto_pmd_gen3.c',
             'dev/qat_crypto_pmd_gen4.c',
+            'dev/qat_crypto_pmd_gen_lce.c',
         ]
         sources += files(join_paths(qat_crypto_relpath, f))
     endforeach
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
new file mode 100644
index 0000000000..c9df8f5dd2
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+
+#include "adf_transport_access_macros.h"
+
+#define ADF_RINGS_PER_INT_SRCSEL_GEN4 2
+#define ADF_BANK_INT_SRC_SEL_MASK_GEN4 0x44UL
+#define ADF_BANK_INT_FLAG_CLEAR_MASK_GEN4 0x3
+#define ADF_RING_BUNDLE_SIZE_GEN_LCE 0x2000
+#define ADF_RING_CSR_RING_CONFIG_GEN_LCE 0x1000
+#define ADF_RING_CSR_RING_LBASE_GEN_LCE 0x1040
+#define ADF_RING_CSR_RING_UBASE_GEN_LCE 0x1080
+
+#define BUILD_RING_BASE_ADDR_GEN_LCE(addr, size) \
+	((((addr) >> 6) & (0xFFFFFFFFFFFFFFFFULL << (size))) << 6)
+
+#define WRITE_CSR_RING_BASE_GEN_LCE(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32);	\
+	ADF_CSR_WR(csr_base_addr,	\
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) +			\
+		ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2),		\
+		l_base);						\
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) +			\
+		ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+
+#define WRITE_CSR_RING_HEAD_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+
+#endif
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
new file mode 100644
index 0000000000..3c7232de12
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+
+#include "adf_transport_access_macros.h"
+#include "adf_transport_access_macros_gen_lce.h"
+
+#define ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF 0x0
+
+#define WRITE_CSR_RING_BASE_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2),	\
+		l_base);	\
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_HEAD_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_SRV_ARB_EN_GEN_LCEVF(csr_base_addr, bank, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+
+#endif
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index 70f0effa62..215b291b74 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -410,4 +410,18 @@ struct icp_qat_fw_la_cipher_20_req_params {
 	uint8_t    spc_auth_res_sz;
 };
 
+struct icp_qat_fw_la_cipher_30_req_params {
+		uint32_t   spc_aad_sz;
+		uint8_t    cipher_length;
+		uint8_t    reserved[2];
+		uint8_t    spc_auth_res_sz;
+		union {
+				uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+				struct {
+						uint64_t cipher_IV_ptr;
+						uint64_t resrvd1;
+			} s;
+
+		} u;
+};
 #endif
diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h
index 9411a79301..642e009f28 100644
--- a/drivers/common/qat/qat_common.h
+++ b/drivers/common/qat/qat_common.h
@@ -21,6 +21,7 @@ enum qat_device_gen {
 	QAT_GEN2,
 	QAT_GEN3,
 	QAT_GEN4,
+	QAT_GEN_LCE,
 	QAT_N_GENS
 };
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
new file mode 100644
index 0000000000..3f1668b3d3
--- /dev/null
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
@@ -0,0 +1,329 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
+#include "qat_sym_session.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
+#include "qat_crypto.h"
+#include "qat_crypto_pmd_gens.h"
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen_lce[] = {
+	QAT_SYM_AEAD_CAP(AES_GCM,
+		CAP_SET(block_size, 16),
+		CAP_RNG(key_size, 32, 32, 0), CAP_RNG(digest_size, 16, 16, 0),
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+qat_sgl_add_buffer_gen_lce(void *list_in, uint64_t addr, uint32_t len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr;
+
+	nr = list->num_bufs;
+
+	if (nr >= QAT_SYM_SGL_MAX_NUMBER) {
+		QAT_DP_LOG(ERR, "Adding %d entry failed, no empty SGL buffer", nr);
+		return -EINVAL;
+	}
+
+	list->buffers[nr].len = len;
+	list->buffers[nr].resrvd = 0;
+	list->buffers[nr].addr = addr;
+
+	list->num_bufs++;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+	QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+		nr, list->buffers[nr].len, list->buffers[nr].addr);
+#endif
+	return 0;
+}
+
+static int
+qat_sgl_fill_array_with_mbuf(struct rte_mbuf *buf, int64_t offset,
+	void *list_in, uint32_t data_len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr, buf_len;
+	int res = -EINVAL;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	uint32_t start_idx;
+	start_idx = list->num_bufs;
+#endif
+
+	/* Append to the existing list */
+	nr = list->num_bufs;
+
+	for (buf_len = 0; buf && nr < QAT_SYM_SGL_MAX_NUMBER; buf = buf->next) {
+		if (offset >= rte_pktmbuf_data_len(buf)) {
+			offset -= rte_pktmbuf_data_len(buf);
+			/* Jump to next mbuf */
+			continue;
+		}
+
+		list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset;
+		list->buffers[nr].resrvd = 0;
+		list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
+
+		offset = 0;
+		buf_len += list->buffers[nr].len;
+
+		if (buf_len >= data_len) {
+			list->buffers[nr].len -= buf_len - data_len;
+			res = 0;
+			break;
+		}
+		++nr;
+	}
+
+	if (unlikely(res != 0)) {
+		if (nr == QAT_SYM_SGL_MAX_NUMBER)
+			QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+				QAT_SYM_SGL_MAX_NUMBER);
+		else
+			QAT_DP_LOG(ERR, "Mbuf chain is too short");
+	} else {
+
+		list->num_bufs = ++nr;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+		for (nr = start_idx; nr < list->num_bufs; nr++) {
+			QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+				nr, list->buffers[nr].len,
+				list->buffers[nr].addr);
+		}
+#endif
+	}
+
+	return res;
+}
+
+static int
+qat_sym_build_op_aead_gen_lce(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_op *op = in_op;
+	uint64_t digest_phys_addr, aad_phys_addr;
+	uint16_t iv_len, aad_len, digest_len, key_len;
+	uint32_t cipher_ofs, iv_offset, cipher_len;
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct icp_qat_fw_la_cipher_30_req_params *cipher_param;
+	enum icp_qat_hw_cipher_dir dir;
+	bool is_digest_adjacent = false;
+
+	if (ctx->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER ||
+		ctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_AES256 ||
+		ctx->qat_mode != ICP_QAT_HW_CIPHER_AEAD_MODE) {
+
+		QAT_DP_LOG(ERR, "Not supported (cmd: %d, alg: %d, mode: %d). "
+			"GEN_LCE PMD only supports AES-256 AEAD mode",
+			ctx->qat_cmd, ctx->qat_cipher_alg, ctx->qat_mode);
+		return -EINVAL;
+	}
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+	cipher_param = (void *)&qat_req->serv_specif_rqpars;
+
+	dir = ctx->qat_dir;
+
+	aad_phys_addr = op->sym->aead.aad.phys_addr;
+	aad_len = ctx->aad_len;
+
+	iv_offset = ctx->cipher_iv.offset;
+	iv_len = ctx->cipher_iv.length;
+
+	cipher_ofs = op->sym->aead.data.offset;
+	cipher_len = op->sym->aead.data.length;
+
+	digest_phys_addr = op->sym->aead.digest.phys_addr;
+	digest_len = ctx->digest_length;
+
+	/* Up to 16B IV can be directly embedded in descriptor.
+	 *  GCM supports only 12B IV for GEN LCE
+	 */
+	if (iv_len != GCM_IV_LENGTH_GEN_LCE) {
+		QAT_DP_LOG(ERR, "iv_len: %d not supported. Must be 12B.",
+			iv_len);
+		return -EINVAL;
+	}
+
+	rte_memcpy(cipher_param->u.cipher_IV_array,
+		rte_crypto_op_ctod_offset(op, uint8_t*, iv_offset),
+		iv_len);
+
+	/* Always SGL */
+	RTE_ASSERT((qat_req->comn_hdr.comn_req_flags &
+		ICP_QAT_FW_SYM_COMM_ADDR_SGL) == 1);
+	/* Always inplace */
+	RTE_ASSERT(op->sym->m_dst == NULL);
+
+	/* Key buffer address is already programmed by reusing the
+	 * content-descriptor buffer
+	 */
+	key_len = ctx->auth_key_length;
+
+	cipher_param->spc_aad_sz = aad_len;
+	cipher_param->cipher_length = key_len;
+	cipher_param->spc_auth_res_sz = digest_len;
+
+	/* Knowing digest is contiguous to cipher-text helps optimizing SGL */
+	if (rte_pktmbuf_iova_offset(op->sym->m_src, cipher_ofs + cipher_len)
+		== digest_phys_addr)
+		is_digest_adjacent = true;
+
+	/* SRC-SGL: 3 entries:
+	 * a) AAD
+	 * b) cipher
+	 * c) digest (only for decrypt and buffer is_NOT_adjacent)
+	 *
+	 */
+	cookie->qat_sgl_src.num_bufs = 0;
+	if (aad_len)
+		qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src, aad_phys_addr,
+			aad_len);
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_src,
+			cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_src,
+			cipher_len);
+
+		/* Digest buffer in decrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_DECRYPT)
+			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src,
+				digest_phys_addr, digest_len);
+	}
+
+	/* (in-place) DST-SGL: 2 entries:
+	 * a) cipher
+	 * b) digest (only for encrypt and buffer is_NOT_adjacent)
+	 */
+	cookie->qat_sgl_dst.num_bufs = 0;
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_dst,
+			cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_dst,
+			cipher_len);
+
+		/* Digest buffer in Encrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_dst,
+				digest_phys_addr, digest_len);
+	}
+
+	/* Length values in 128B descriptor */
+	qat_req->comn_mid.src_length = cipher_len;
+	qat_req->comn_mid.dst_length = cipher_len;
+
+	if (dir == ICP_QAT_HW_CIPHER_ENCRYPT) /* Digest buffer in Encrypt job */
+		qat_req->comn_mid.dst_length += GCM_256_DIGEST_LEN;
+
+	/* src & dst SGL addresses in 128B descriptor */
+	qat_req->comn_mid.src_data_addr = cookie->qat_sgl_src_phys_addr;
+	qat_req->comn_mid.dest_data_addr = cookie->qat_sgl_dst_phys_addr;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+		sizeof(struct icp_qat_fw_la_bulk_req));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
+		rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+		rte_pktmbuf_data_len(op->sym->m_src));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
+		digest_len);
+	QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data, aad_len);
+#endif
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen_lce(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+
+	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
+		return -EINVAL;
+
+	/* build request for aead */
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES256 &&
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) {
+		build_request = qat_sym_build_op_aead_gen_lce;
+		ctx->build_request[proc_type] = build_request;
+	}
+	return 0;
+}
+
+
+static int
+qat_sym_crypto_cap_get_gen_lce(struct qat_cryptodev_private *internals,
+	const char *capa_memz_name,
+	const uint16_t __rte_unused slice_map)
+{
+	const uint32_t size = sizeof(qat_sym_crypto_caps_gen_lce);
+	uint32_t i;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+			size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities");
+			return -1;
+		}
+	}
+
+	struct rte_cryptodev_capabilities *addr =
+		(struct rte_cryptodev_capabilities *)
+		internals->capa_mz->addr;
+	const struct rte_cryptodev_capabilities *capabilities =
+		qat_sym_crypto_caps_gen_lce;
+	const uint32_t capa_num =
+		size / sizeof(struct rte_cryptodev_capabilities);
+	uint32_t curr_capa = 0;
+
+	for (i = 0; i < capa_num; i++) {
+		memcpy(addr + curr_capa, capabilities + i,
+			sizeof(struct rte_cryptodev_capabilities));
+		curr_capa++;
+	}
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	return 0;
+}
+
+RTE_INIT(qat_sym_crypto_gen_lce_init)
+{
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = &qat_sym_crypto_ops_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_capabilities =
+			qat_sym_crypto_cap_get_gen_lce;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_session =
+			qat_sym_crypto_set_session_gen_lce;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_raw_dp_ctx = NULL;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags =
+			qat_sym_crypto_feature_flags_get_gen1;
+}
+
+RTE_INIT(qat_asym_crypto_gen_lce_init)
+{
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_capabilities = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].set_session = NULL;
+}
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f976009bf2..f2f197d050 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -95,6 +95,12 @@
 /* Maximum data length for single pass GMAC: 2^14-1 */
 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
 
+/* Digest length for GCM Algo is 16 bytes */
+#define GCM_256_DIGEST_LEN 16
+
+/* IV length for GCM algo is 12 bytes */
+#define GCM_IV_LENGTH_GEN_LCE 12
+
 struct qat_sym_session;
 
 struct qat_sym_sgl {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v5 2/4] common/qat: update common driver to support GEN LCE
  2024-02-27 11:33 ` [PATCH v5 " Nishikant Nayak
  2024-02-27 11:33   ` [PATCH v5 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
@ 2024-02-27 11:33   ` Nishikant Nayak
  2024-02-27 11:33   ` [PATCH v5 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
  2024-02-27 11:33   ` [PATCH v5 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
  3 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-27 11:33 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, Nishikant Nayak

Adding GEN LCE specific macros which is required for updating
the support for GEN LCE features.
Also this patch adds other macros which is being used by GEN LCE
Specific APIs.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
Acked-by: Ciara Power <ciara.power@intel.com>
---
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
    - Fixed code formatting
---
---
 .../qat/qat_adf/adf_transport_access_macros.h |  1 +
 drivers/common/qat/qat_adf/icp_qat_fw.h       | 34 ++++++++++++++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    | 45 ++++++++++++++++++-
 drivers/common/qat/qat_device.c               |  9 ++++
 4 files changed, 88 insertions(+), 1 deletion(-)

diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
index 12a7258c60..19bd812419 100644
--- a/drivers/common/qat/qat_adf/adf_transport_access_macros.h
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
@@ -47,6 +47,7 @@
 #define ADF_RING_SIZE_512 0x03
 #define ADF_RING_SIZE_4K 0x06
 #define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_64K 0x0A
 #define ADF_RING_SIZE_4M 0x10
 #define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
 #define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw.h b/drivers/common/qat/qat_adf/icp_qat_fw.h
index 3aa17ae041..b78158e01d 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw.h
@@ -57,6 +57,12 @@ struct icp_qat_fw_comn_req_hdr_cd_pars {
 	} u;
 };
 
+struct lce_key_buff_desc {
+	uint64_t keybuff;
+	uint32_t keybuff_resrvd1;
+	uint32_t keybuff_resrvd2;
+};
+
 struct icp_qat_fw_comn_req_mid {
 	uint64_t opaque_data;
 	uint64_t src_data_addr;
@@ -123,6 +129,12 @@ struct icp_qat_fw_comn_resp {
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_BITPOS 0
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_MASK 0x1
 
+/* GEN_LCE specific Common Header fields */
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS 5
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_MASK 0x3
+#define ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT 3
+#define ICP_QAT_FW_COMN_GEN_LCE_STATUS_FLAG_ERROR 0
+
 #define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
 	icp_qat_fw_comn_req_hdr_t.service_type
 
@@ -168,6 +180,12 @@ struct icp_qat_fw_comn_resp {
 	(((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
 	 ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
 
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(valid, desc_layout) \
+	((((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+	ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) | \
+	(((desc_layout) & ICP_QAT_FW_COMN_DESC_LAYOUT_MASK) << \
+	ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS))
+
 #define QAT_COMN_PTR_TYPE_BITPOS 0
 #define QAT_COMN_PTR_TYPE_MASK 0x1
 #define QAT_COMN_CD_FLD_TYPE_BITPOS 1
@@ -180,10 +198,20 @@ struct icp_qat_fw_comn_resp {
 #define QAT_COMN_EXT_FLAGS_MASK 0x1
 #define QAT_COMN_EXT_FLAGS_USED 0x1
 
+/* GEN_LCE specific Common Request Flags fields */
+#define QAT_COMN_KEYBUF_USAGE_BITPOS 1
+#define QAT_COMN_KEYBUF_USAGE_MASK 0x1
+#define QAT_COMN_KEY_BUFFER_USED 1
+
 #define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
 	((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
 	 | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
 
+#define ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(ptr, keybuf) \
+	((((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS) | \
+	 (((keybuf) & QAT_COMN_PTR_TYPE_MASK) << \
+	   QAT_COMN_KEYBUF_USAGE_BITPOS))
+
 #define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
 	QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
 
@@ -249,6 +277,8 @@ struct icp_qat_fw_comn_resp {
 #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1
+#define QAT_COMN_RESP_INVALID_PARAM_BITPOS 1
+#define QAT_COMN_RESP_INVALID_PARAM_MASK 0x1
 #define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0
 #define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1
 
@@ -280,6 +310,10 @@ struct icp_qat_fw_comn_resp {
 	QAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \
 	QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK)
 
+#define ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_INVALID_PARAM_BITPOS, \
+	QAT_COMN_RESP_INVALID_PARAM_MASK)
+
 #define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
 #define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
 #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index 215b291b74..eba9f96685 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -22,14 +22,24 @@ enum icp_qat_fw_la_cmd_id {
 	ICP_QAT_FW_LA_CMD_DELIMITER = 18
 };
 
+/* In GEN_LCE Command ID 4 corresponds to AEAD */
+#define ICP_QAT_FW_LA_CMD_AEAD 4
+
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 #define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 
+/* GEN_LCE Hash, HMAC and GCM Verification Status */
+#define ICP_QAT_FW_LA_VER_STATUS_FAIL ICP_QAT_FW_COMN_GEN_LCE_STATUS_FLAG_ERROR
+
+
 struct icp_qat_fw_la_bulk_req {
 	struct icp_qat_fw_comn_req_hdr comn_hdr;
-	struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+	union {
+		struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+		struct lce_key_buff_desc key_buff;
+	};
 	struct icp_qat_fw_comn_req_mid comn_mid;
 	struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
 	struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
@@ -81,6 +91,21 @@ struct icp_qat_fw_la_bulk_req {
 #define ICP_QAT_FW_LA_PARTIAL_END 2
 #define QAT_LA_PARTIAL_BITPOS 0
 #define QAT_LA_PARTIAL_MASK 0x3
+
+/* GEN_LCE specific Crypto Flags fields */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS 6
+#define ICP_QAT_FW_SYM_AEAD_ALGO_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_SIZE_BITPOS 9
+#define ICP_QAT_FW_SYM_IV_SIZE_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS 11
+#define ICP_QAT_FW_SYM_IV_IN_DESC_MASK 0x1
+#define ICP_QAT_FW_SYM_IV_IN_DESC_VALID 1
+#define ICP_QAT_FW_SYM_DIRECTION_BITPOS 15
+#define ICP_QAT_FW_SYM_DIRECTION_MASK 0x1
+
+/* In GEN_LCE AEAD AES GCM Algorithm has ID 0 */
+#define QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE 0
+
 #define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
 	cmp_auth, ret_auth, update_state, \
 	ciph_iv, ciphcfg, partial) \
@@ -188,6 +213,23 @@ struct icp_qat_fw_la_bulk_req {
 	QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
 	QAT_LA_PARTIAL_MASK)
 
+/* GEN_LCE specific Crypto Flags operations */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS, \
+	ICP_QAT_FW_SYM_AEAD_ALGO_MASK)
+
+#define ICP_QAT_FW_SYM_IV_SIZE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_SIZE_BITPOS, \
+	ICP_QAT_FW_SYM_IV_SIZE_MASK)
+
+#define ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS, \
+	ICP_QAT_FW_SYM_IV_IN_DESC_MASK)
+
+#define ICP_QAT_FW_SYM_DIR_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_DIRECTION_BITPOS, \
+	ICP_QAT_FW_SYM_DIRECTION_MASK)
+
 #define QAT_FW_LA_MODE2 1
 #define QAT_FW_LA_NO_MODE2 0
 #define QAT_FW_LA_MODE2_MASK 0x1
@@ -424,4 +466,5 @@ struct icp_qat_fw_la_cipher_30_req_params {
 
 		} u;
 };
+
 #endif
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index f55dc3c6f0..18e652e393 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -62,6 +62,12 @@ static const struct rte_pci_id pci_id_qat_map[] = {
 		{
 			RTE_PCI_DEVICE(0x8086, 0x4945),
 		},
+		{
+			RTE_PCI_DEVICE(0x8086, 0x1454),
+		},
+		{
+			RTE_PCI_DEVICE(0x8086, 0x1456),
+		},
 		{.device_id = 0},
 };
 
@@ -199,6 +205,9 @@ pick_gen(const struct rte_pci_device *pci_dev)
 	case 0x4943:
 	case 0x4945:
 		return QAT_GEN4;
+	case 0x1454:
+	case 0x1456:
+		return QAT_GEN_LCE;
 	default:
 		QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
 		return QAT_N_GENS;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v5 3/4] crypto/qat: update headers for GEN LCE support
  2024-02-27 11:33 ` [PATCH v5 " Nishikant Nayak
  2024-02-27 11:33   ` [PATCH v5 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
  2024-02-27 11:33   ` [PATCH v5 2/4] common/qat: update common driver to support " Nishikant Nayak
@ 2024-02-27 11:33   ` Nishikant Nayak
  2024-02-27 11:33   ` [PATCH v5 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
  3 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-27 11:33 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, Nishikant Nayak

This patch handles the changes required for updating the common
header fields specific to GEN LCE, Also added/updated of the response
processing APIs based on GEN LCE requirement.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
Acked-by: Ciara Power <ciara.power@intel.com>
---
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
    - Added GEN LCE specific API for deque burst.
    - Fixed code formatting.
---
---
 drivers/crypto/qat/qat_sym.c         | 16 ++++++-
 drivers/crypto/qat/qat_sym.h         | 60 ++++++++++++++++++++++++++-
 drivers/crypto/qat/qat_sym_session.c | 62 +++++++++++++++++++++++++++-
 drivers/crypto/qat/qat_sym_session.h | 10 ++++-
 4 files changed, 140 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 6e03bde841..439a3fc00b 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -180,7 +180,15 @@ qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
 	return qat_dequeue_op_burst(qp, (void **)ops,
-				qat_sym_process_response, nb_ops);
+			qat_sym_process_response, nb_ops);
+}
+
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops,
+							uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+			qat_sym_process_response_gen_lce, nb_ops);
 }
 
 int
@@ -200,6 +208,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
 	struct rte_cryptodev *cryptodev;
 	struct qat_cryptodev_private *internals;
+	enum qat_device_gen qat_dev_gen = qat_pci_dev->qat_dev_gen;
 	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
 		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
 
@@ -249,7 +258,10 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
 	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
-	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
+	if (qat_dev_gen == QAT_GEN_LCE)
+		cryptodev->dequeue_burst = qat_sym_dequeue_burst_gen_lce;
+	else
+		cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
 	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f2f197d050..3461113c13 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -90,7 +90,7 @@
 /*
  * Maximum number of SGL entries
  */
-#define QAT_SYM_SGL_MAX_NUMBER	16
+#define QAT_SYM_SGL_MAX_NUMBER 16
 
 /* Maximum data length for single pass GMAC: 2^14-1 */
 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
@@ -142,6 +142,10 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 #ifdef RTE_QAT_OPENSSL
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
@@ -390,6 +394,52 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
 	return 1;
 }
 
+static __rte_always_inline int
+qat_sym_process_response_gen_lce(void **op, uint8_t *resp,
+	void *op_cookie __rte_unused,
+	uint64_t *dequeue_err_count __rte_unused)
+{
+	struct icp_qat_fw_comn_resp *resp_msg =
+		(struct icp_qat_fw_comn_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+		(resp_msg->opaque_data);
+	struct qat_sym_session *sess;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+		sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+	sess = CRYPTODEV_GET_SYM_SESS_PRIV(rx_op->sym->session);
+
+	rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+		ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+	else if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+		ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+	if (sess->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		if (ICP_QAT_FW_LA_VER_STATUS_FAIL ==
+			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+				resp_msg->comn_hdr.comn_status))
+			rx_op->status =	RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	}
+
+	*op = (void *)rx_op;
+
+	/*
+	 * return 1 as dequeue op only move on to the next op
+	 * if one was ready to return to API
+	 */
+	return 1;
+}
+
 int
 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
@@ -455,7 +505,13 @@ qat_sym_preprocess_requests(void **ops __rte_unused,
 
 static inline void
 qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
-	void *op_cookie __rte_unused)
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
+{
+}
+
+static inline void
+qat_sym_process_response_gen_lce(void **op __rte_unused, uint8_t *resp __rte_unused,
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
 {
 }
 
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 9f4f6c3d93..8f50b61365 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -136,6 +136,9 @@ qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 static void
 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session);
+
 /* Req/cd init functions */
 
 static void
@@ -738,6 +741,12 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		session->qat_cmd);
 		return -ENOTSUP;
 	}
+
+	if (qat_dev_gen == QAT_GEN_LCE) {
+		qat_sym_session_init_gen_lce_hdr(session);
+		return 0;
+	}
+
 	qat_sym_session_finalize(session);
 
 	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
@@ -1016,6 +1025,12 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 			dev->data->dev_private;
 	enum qat_device_gen qat_dev_gen =
 			internals->qat_dev->qat_dev_gen;
+	if (qat_dev_gen == QAT_GEN_LCE) {
+		struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+		struct lce_key_buff_desc *key_buff = &req_tmpl->key_buff;
+
+		key_buff->keybuff = session->key_paddr;
+	}
 
 	/*
 	 * Store AEAD IV parameters as cipher IV,
@@ -1079,9 +1094,15 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 	}
 
 	if (session->is_single_pass) {
-		if (qat_sym_cd_cipher_set(session,
+		if (qat_dev_gen != QAT_GEN_LCE) {
+			if (qat_sym_cd_cipher_set(session,
 				aead_xform->key.data, aead_xform->key.length))
-			return -EINVAL;
+				return -EINVAL;
+		} else {
+			session->auth_key_length = aead_xform->key.length;
+			memcpy(session->key_array, aead_xform->key.data,
+							aead_xform->key.length);
+		}
 	} else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
 			aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
 			(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
@@ -1970,6 +1991,43 @@ qat_sym_session_init_common_hdr(struct qat_sym_session *session)
 					ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
 }
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session)
+{
+	struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+	/*
+	 * GEN_LCE specifies separate command id for AEAD operations but Cryptodev
+	 * API processes AEAD operations as Single pass Crypto operations.
+	 * Hence even for GEN_LCE, Session Algo Command ID is CIPHER.
+	 * Note, however Session Algo Mode is AEAD.
+	 */
+	header->service_cmd_id = ICP_QAT_FW_LA_CMD_AEAD;
+	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+	header->hdr_flags =
+	ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(ICP_QAT_FW_COMN_REQ_FLAG_SET,
+			ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT);
+	header->comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(QAT_COMN_PTR_TYPE_SGL,
+			QAT_COMN_KEY_BUFFER_USED);
+
+	ICP_QAT_FW_SYM_AEAD_ALGO_SET(header->serv_specif_flags,
+		QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE);
+	ICP_QAT_FW_SYM_IV_SIZE_SET(header->serv_specif_flags,
+		ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+	ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(header->serv_specif_flags,
+		ICP_QAT_FW_SYM_IV_IN_DESC_VALID);
+
+	if (session->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
+			ICP_QAT_HW_CIPHER_DECRYPT);
+	} else {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
+			ICP_QAT_HW_CIPHER_ENCRYPT);
+	}
+}
+
 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 						const uint8_t *cipherkey,
 						uint32_t cipherkeylen)
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 9209e2e8df..958af03405 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -111,10 +111,16 @@ struct qat_sym_session {
 	enum icp_qat_hw_auth_op auth_op;
 	enum icp_qat_hw_auth_mode auth_mode;
 	void *bpi_ctx;
-	struct qat_sym_cd cd;
+	union {
+		struct qat_sym_cd cd;
+		uint8_t key_array[32];
+	};
 	uint8_t prefix_state[QAT_PREFIX_TBL_SIZE] __rte_cache_aligned;
 	uint8_t *cd_cur_ptr;
-	phys_addr_t cd_paddr;
+	union {
+		phys_addr_t cd_paddr;
+		phys_addr_t key_paddr;
+	};
 	phys_addr_t prefix_paddr;
 	struct icp_qat_fw_la_bulk_req fw_req;
 	uint8_t aad_len;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v5 4/4] test/cryptodev: add tests for GCM with AAD
  2024-02-27 11:33 ` [PATCH v5 " Nishikant Nayak
                     ` (2 preceding siblings ...)
  2024-02-27 11:33   ` [PATCH v5 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
@ 2024-02-27 11:33   ` Nishikant Nayak
  3 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-27 11:33 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi,
	Nishikant Nayak, Akhil Goyal, Fan Zhang

Adding one new unit test code for validating the features
added as part of GCM with 64 byte AAD.
The new test case adds one new test for GCM algo for both
encrypt and decrypt operations.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
Acked-by: Ciara Power <ciara.power@intel.com>
---
v2:
    - Removed unused code.
    - Added one new unit test, AAD with GCM for GEN LCE.
---
---
 app/test/test_cryptodev.c                   | 48 +++++++++++++---
 app/test/test_cryptodev_aead_test_vectors.h | 62 +++++++++++++++++++++
 2 files changed, 103 insertions(+), 7 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 38a65aa88f..edd23731f7 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -12494,6 +12494,18 @@ test_AES_GCM_auth_decryption_test_case_256_7(void)
 	return test_authenticated_decryption(&gcm_test_case_256_7);
 }
 
+static int
+test_AES_GCM_auth_decryption_test_case_256_8(void)
+{
+	return test_authenticated_decryption(&gcm_test_case_256_8);
+}
+
+static int
+test_AES_GCM_auth_encryption_test_case_256_8(void)
+{
+	return test_authenticated_encryption(&gcm_test_case_256_8);
+}
+
 static int
 test_AES_GCM_auth_decryption_test_case_aad_1(void)
 {
@@ -12613,10 +12625,16 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
@@ -12719,16 +12737,22 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
-		return TEST_SKIPPED;
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
 
 	/* not supported with CPU crypto and raw data-path APIs*/
 	if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO ||
 			global_api_test_type == CRYPTODEV_RAW_API_TEST)
 		return TEST_SKIPPED;
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
+		return TEST_SKIPPED;
 
 	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
 			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
@@ -15749,10 +15773,16 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	/*
@@ -17392,6 +17422,8 @@ static struct unit_test_suite cryptodev_aes_gcm_auth_testsuite  = {
 			test_AES_GCM_auth_encryption_test_case_256_6),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_auth_encryption_test_case_256_7),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encryption_test_case_256_8),
 
 		/** AES GCM Authenticated Decryption 256 bits key */
 		TEST_CASE_ST(ut_setup, ut_teardown,
@@ -17408,6 +17440,8 @@ static struct unit_test_suite cryptodev_aes_gcm_auth_testsuite  = {
 			test_AES_GCM_auth_decryption_test_case_256_6),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_auth_decryption_test_case_256_7),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_decryption_test_case_256_8),
 
 		/** AES GCM Authenticated Encryption big aad size */
 		TEST_CASE_ST(ut_setup, ut_teardown,
diff --git a/app/test/test_cryptodev_aead_test_vectors.h b/app/test/test_cryptodev_aead_test_vectors.h
index 07292620a4..eadf206e4d 100644
--- a/app/test/test_cryptodev_aead_test_vectors.h
+++ b/app/test/test_cryptodev_aead_test_vectors.h
@@ -17,6 +17,16 @@ static uint8_t gcm_aad_text[MAX_AAD_LENGTH] = {
 		0x00, 0xf1, 0xe2, 0xd3, 0xc4, 0xb5, 0xa6, 0x97,
 		0x88, 0x79, 0x6a, 0x5b, 0x4c, 0x3d, 0x2e, 0x1f };
 
+static uint8_t gcm_aad_64B_text[MAX_AAD_LENGTH] = {
+		0xED, 0x3E, 0xA8, 0x1F, 0x74, 0xE5, 0xD1, 0x96,
+		0xA4, 0xD5, 0x4B, 0x26, 0xBB, 0x20, 0x61, 0x7B,
+		0x3B, 0x9C, 0x2A, 0x69, 0x90, 0xEF, 0xD7, 0x9A,
+		0x94, 0xC2, 0xF5, 0x86, 0xBD, 0x00, 0xF6, 0xEA,
+		0x0B, 0x14, 0x24, 0xF2, 0x08, 0x67, 0x42, 0x3A,
+		0xB5, 0xB8, 0x32, 0x97, 0xB5, 0x99, 0x69, 0x75,
+		0x60, 0x00, 0x8F, 0xF7, 0x6F, 0x16, 0x52, 0x66,
+		0xF1, 0xA9, 0x38, 0xFD, 0xB0, 0x61, 0x60, 0xB5 };
+
 static uint8_t ccm_aad_test_1[8] = {
 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
 };
@@ -1736,6 +1746,58 @@ static const struct aead_test_data gcm_test_case_256_7 = {
 	}
 };
 
+static const struct aead_test_data gcm_test_case_256_8 = {
+	.algo = RTE_CRYPTO_AEAD_AES_GCM,
+	.key = {
+		.data = {
+			0xD8, 0xFD, 0x8F, 0x5A, 0x13, 0x7B, 0x05, 0x2C,
+			0xA4, 0x64, 0x7A, 0xDD, 0x1E, 0x9A, 0x68, 0x33,
+			0x04, 0x70, 0xE8, 0x1E, 0x42, 0x84, 0x64, 0xD2,
+			0x23, 0xA1, 0x6A, 0x0A, 0x05, 0x7B, 0x90, 0xDE},
+		.len = 32
+	},
+	.iv = {
+		.data = {
+			0x8D, 0xDF, 0xB8, 0x7F, 0xD0, 0x79, 0x77, 0x55,
+			0xD5, 0x48, 0x03, 0x05},
+		.len = 12
+	},
+	.aad = {
+		.data = gcm_aad_64B_text,
+		.len = 64
+	},
+	.plaintext = {
+		.data = {
+			0x4D, 0xBC, 0x2C, 0x7F, 0x25, 0x1F, 0x07, 0x25,
+			0x54, 0x8C, 0x43, 0xDB, 0xD8, 0x06, 0x9F, 0xBF,
+			0xCA, 0x60, 0xF4, 0xEF, 0x13, 0x87, 0xE8, 0x2F,
+			0x4D, 0x9D, 0x1D, 0x87, 0x9F, 0x91, 0x79, 0x7E,
+			0x3E, 0x98, 0xA3, 0x63, 0xC6, 0xFE, 0xDB, 0x35,
+			0x96, 0x59, 0xB2, 0x0C, 0x80, 0x96, 0x70, 0x07,
+			0x87, 0x42, 0xAB, 0x4F, 0x31, 0x73, 0xC4, 0xF9,
+			0xB0, 0x1E, 0xF1, 0xBC, 0x7D, 0x45, 0xE5, 0xF3},
+		.len = 64
+	},
+	.ciphertext = {
+	    .data = {
+			0x21, 0xFA, 0x59, 0x4F, 0x1F, 0x6B, 0x19, 0xC2,
+			0x68, 0xBC, 0x05, 0x93, 0x4E, 0x48, 0x6C, 0x5B,
+			0x0B, 0x7A, 0x43, 0xB7, 0x60, 0x8E, 0x00, 0xC4,
+			0xAB, 0x14, 0x6B, 0xCC, 0xA1, 0x27, 0x6A, 0xDE,
+			0x8E, 0xB6, 0x98, 0xBB, 0x4F, 0xD0, 0x6F, 0x30,
+			0x0F, 0x04, 0xA8, 0x5B, 0xDC, 0xD8, 0xE8, 0x8A,
+			0x73, 0xD9, 0xB8, 0x60, 0x7C, 0xE4, 0x32, 0x4C,
+			0x3A, 0x0B, 0xC2, 0x82, 0xDA, 0x88, 0x17, 0x69},
+	    .len = 64
+	},
+	.auth_tag = {
+		.data = {
+			0x3B, 0x80, 0x83, 0x72, 0xE5, 0x1B, 0x94, 0x15,
+			0x75, 0xC8, 0x62, 0xBC, 0xA1, 0x66, 0x91, 0x45},
+		.len = 16
+	}
+};
+
 /** variable AAD AES-GCM-128 Test Vectors */
 static const struct aead_test_data gcm_test_case_aad_1 = {
 	.algo = RTE_CRYPTO_AEAD_AES_GCM,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v6 0/4] add QAT GEN LCE device
  2023-12-20 13:26 [PATCH 1/4] common/qat: add files specific to GEN5 Nishikant Nayak
                   ` (7 preceding siblings ...)
  2024-02-27 11:33 ` [PATCH v5 " Nishikant Nayak
@ 2024-02-28 14:00 ` Nishikant Nayak
  2024-02-28 14:00   ` [PATCH v6 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
                     ` (3 more replies)
  2024-02-29 18:43 ` [PATCH v7 0/3] add QAT GEN LCE device Ciara Power
  2024-02-29 19:45 ` [PATCH v8 0/3] add QAT GEN LCE device Ciara Power
  10 siblings, 4 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-28 14:00 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, Nishikant Nayak

This patchset adds a new QAT LCE device.
The device currently only supports symmetric crypto,
and only the AES-GCM algorithm.

v6:
  - Added documentation and release note changes.
  - Removed unused device PCI ID.
v5:
  - Fixed compilation issue by replacing __u8 with uint8_t.
v4:
  - Fixed cover letter, v3 included the wrong details relating
    to another patchset.
v3:
  - Fixed typos in commit and code comments.
  - Replaced use of linux/kernel.h macro with local macro
    to fix ARM compilation in CI.
v2:
   - Renamed device from GEN 5 to GEN LCE.
   - Removed unused code.
   - Updated macro names.

Nishikant Nayak (4):
  common/qat: add files specific to GEN LCE
  common/qat: update common driver to support GEN LCE
  crypto/qat: update headers for GEN LCE support
  test/cryptodev: add tests for GCM with AAD

 .mailmap                                      |   1 +
 app/test/test_cryptodev.c                     |  48 ++-
 app/test/test_cryptodev_aead_test_vectors.h   |  62 ++++
 doc/guides/cryptodevs/qat.rst                 |   1 +
 doc/guides/rel_notes/release_24_03.rst        |   4 +
 drivers/common/qat/dev/qat_dev_gen_lce.c      | 306 ++++++++++++++++
 drivers/common/qat/meson.build                |   2 +
 .../qat/qat_adf/adf_transport_access_macros.h |   1 +
 .../adf_transport_access_macros_gen_lce.h     |  51 +++
 .../adf_transport_access_macros_gen_lcevf.h   |  48 +++
 drivers/common/qat/qat_adf/icp_qat_fw.h       |  34 ++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    |  59 +++-
 drivers/common/qat/qat_common.h               |   1 +
 drivers/common/qat/qat_device.c               |   5 +
 .../crypto/qat/dev/qat_crypto_pmd_gen_lce.c   | 329 ++++++++++++++++++
 drivers/crypto/qat/qat_sym.c                  |  16 +-
 drivers/crypto/qat/qat_sym.h                  |  66 +++-
 drivers/crypto/qat/qat_sym_session.c          |  62 +++-
 drivers/crypto/qat/qat_sym_session.h          |  10 +-
 19 files changed, 1090 insertions(+), 16 deletions(-)
 create mode 100644 drivers/common/qat/dev/qat_dev_gen_lce.c
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
 create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c

-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v6 1/4] common/qat: add files specific to GEN LCE
  2024-02-28 14:00 ` [PATCH v6 0/4] add QAT GEN LCE device Nishikant Nayak
@ 2024-02-28 14:00   ` Nishikant Nayak
  2024-02-29 16:09     ` [EXT] " Akhil Goyal
  2024-02-29 16:14     ` Akhil Goyal
  2024-02-28 14:00   ` [PATCH v6 2/4] common/qat: update common driver to support " Nishikant Nayak
                     ` (2 subsequent siblings)
  3 siblings, 2 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-28 14:00 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi,
	Nishikant Nayak, Thomas Monjalon, Anatoly Burakov

Adding GEN5 files for handling GEN LCE specific operations.
These files are inherited from the existing files/APIs
which has some changes specific GEN5 requirements
Also updated the mailmap file.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
Acked-by: Ciara Power <ciara.power@intel.com>
---
v5:
    - Replaced usage of __u8 with uint8_t.
v3:
    - Removed use of linux/kernel.h macro to fix ARM compilation.
    - Fixed typo in commit body and code comment.
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
---
---
 .mailmap                                      |   1 +
 drivers/common/qat/dev/qat_dev_gen_lce.c      | 306 ++++++++++++++++
 drivers/common/qat/meson.build                |   2 +
 .../adf_transport_access_macros_gen_lce.h     |  51 +++
 .../adf_transport_access_macros_gen_lcevf.h   |  48 +++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    |  14 +
 drivers/common/qat/qat_common.h               |   1 +
 .../crypto/qat/dev/qat_crypto_pmd_gen_lce.c   | 329 ++++++++++++++++++
 drivers/crypto/qat/qat_sym.h                  |   6 +
 9 files changed, 758 insertions(+)
 create mode 100644 drivers/common/qat/dev/qat_dev_gen_lce.c
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
 create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c

diff --git a/.mailmap b/.mailmap
index 58cca13ef6..8008e5a899 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1036,6 +1036,7 @@ Ning Li <muziding001@163.com> <lining18@jd.com>
 Nipun Gupta <nipun.gupta@amd.com> <nipun.gupta@nxp.com>
 Nir Efrati <nir.efrati@intel.com>
 Nirmoy Das <ndas@suse.de>
+Nishikant Nayak <nishikanta.nayak@intel.com>
 Nithin Dabilpuram <ndabilpuram@marvell.com> <nithin.dabilpuram@caviumnetworks.com>
 Nitin Saxena <nitin.saxena@caviumnetworks.com>
 Nitzan Weller <nitzanwe@mellanox.com>
diff --git a/drivers/common/qat/dev/qat_dev_gen_lce.c b/drivers/common/qat/dev/qat_dev_gen_lce.c
new file mode 100644
index 0000000000..5faaefc2d8
--- /dev/null
+++ b/drivers/common/qat/dev/qat_dev_gen_lce.c
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_pci.h>
+#include <rte_vfio.h>
+
+#include "qat_device.h"
+#include "qat_qp.h"
+#include "adf_transport_access_macros_gen_lcevf.h"
+#include "adf_pf2vf_msg.h"
+#include "qat_pf2vf.h"
+
+#include <stdint.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#define BITS_PER_ULONG		(sizeof(unsigned long) * 8)
+
+#define VFIO_PCI_LCE_DEVICE_CFG_REGION_INDEX	VFIO_PCI_NUM_REGIONS
+#define VFIO_PCI_LCE_CY_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 2)
+#define VFIO_PCI_LCE_RING_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 4)
+#define LCE_DEVICE_NAME_SIZE			64
+#define LCE_DEVICE_MAX_BANKS			2080
+#define LCE_DIV_ROUND_UP(n, d)  (((n) + (d) - 1) / (d))
+#define LCE_DEVICE_BITMAP_SIZE  \
+	LCE_DIV_ROUND_UP(LCE_DEVICE_MAX_BANKS, BITS_PER_ULONG)
+
+/* QAT GEN_LCE specific macros */
+#define QAT_GEN_LCE_BUNDLE_NUM		LCE_DEVICE_MAX_BANKS
+#define QAT_GEN4_QPS_PER_BUNDLE_NUM	1
+
+/**
+ * struct lce_vfio_dev_cap - LCE device capabilities
+ *
+ * Device level capabilities and service level capabilities
+ */
+struct lce_vfio_dev_cap {
+	uint16_t device_num;
+	uint16_t device_type;
+	uint32_t capability_mask;
+	uint32_t extended_capabilities;
+	uint16_t max_banks;
+	uint16_t max_rings_per_bank;
+	uint16_t arb_mask;
+	uint16_t services;
+	uint16_t pkg_id;
+	uint16_t node_id;
+	uint8_t device_name[LCE_DEVICE_NAME_SIZE];
+};
+
+/* struct lce_vfio_dev_cy_cap - CY capabilities of LCE device */
+struct lce_vfio_dev_cy_cap {
+	uint32_t nr_banks;
+	unsigned long bitmap[LCE_DEVICE_BITMAP_SIZE];
+};
+
+struct lce_qat_domain {
+	uint32_t nid        :3;
+	uint32_t fid        :7;
+	uint32_t ftype      :2;
+	uint32_t vfid       :13;
+	uint32_t rid        :4;
+	uint32_t vld        :1;
+	uint32_t desc_over  :1;
+	uint32_t pasid_vld  :1;
+	uint32_t pasid      :20;
+};
+
+struct lce_qat_buf_domain {
+	uint32_t bank_id:   20;
+	uint32_t type:      4;
+	uint32_t resv:      8;
+	struct lce_qat_domain dom;
+};
+
+struct qat_dev_gen_lce_extra {
+	struct qat_qp_hw_data
+	    qp_gen_lce_data[QAT_GEN_LCE_BUNDLE_NUM][QAT_GEN4_QPS_PER_BUNDLE_NUM];
+};
+
+static struct qat_pf2vf_dev qat_pf2vf_gen_lce = {
+	.pf2vf_offset = ADF_4XXXIOV_PF2VM_OFFSET,
+	.vf2pf_offset = ADF_4XXXIOV_VM2PF_OFFSET,
+	.pf2vf_type_shift = ADF_PFVF_2X_MSGTYPE_SHIFT,
+	.pf2vf_type_mask = ADF_PFVF_2X_MSGTYPE_MASK,
+	.pf2vf_data_shift = ADF_PFVF_2X_MSGDATA_SHIFT,
+	.pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK,
+};
+
+static int
+qat_select_valid_queue_gen_lce(struct qat_pci_device *qat_dev, int qp_id,
+			    enum qat_service_type service_type)
+{
+	int i = 0, valid_qps = 0;
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+	for (; i < QAT_GEN_LCE_BUNDLE_NUM; i++) {
+		if (dev_extra->qp_gen_lce_data[i][0].service_type ==
+				service_type) {
+			if (valid_qps == qp_id)
+				return i;
+			++valid_qps;
+		}
+	}
+	return -1;
+}
+
+static const struct qat_qp_hw_data *
+qat_qp_get_hw_data_gen_lce(struct qat_pci_device *qat_dev,
+			enum qat_service_type service_type, uint16_t qp_id)
+{
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+	int ring_pair = qat_select_valid_queue_gen_lce(qat_dev, qp_id,
+								service_type);
+
+	if (ring_pair < 0)
+		return NULL;
+
+	return &dev_extra->qp_gen_lce_data[ring_pair][0];
+}
+
+static int
+qat_qp_rings_per_service_gen_lce(struct qat_pci_device *qat_dev,
+			      enum qat_service_type service)
+{
+	int i = 0, count = 0, max_ops_per_srv = 0;
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+	max_ops_per_srv = QAT_GEN_LCE_BUNDLE_NUM;
+	for (i = 0, count = 0; i < max_ops_per_srv; i++)
+		if (dev_extra->qp_gen_lce_data[i][0].service_type == service)
+			count++;
+	return count;
+}
+
+static int qat_dev_read_config_gen_lce(struct qat_pci_device *qat_dev)
+{
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+	struct qat_qp_hw_data *hw_data;
+
+	/** Enable only crypto ring: RP-0 */
+	hw_data = &dev_extra->qp_gen_lce_data[0][0];
+	memset(hw_data, 0, sizeof(*hw_data));
+
+	hw_data->service_type = QAT_SERVICE_SYMMETRIC;
+	hw_data->tx_msg_size = 128;
+	hw_data->rx_msg_size = 32;
+
+	hw_data->tx_ring_num = 0;
+	hw_data->rx_ring_num = 1;
+
+	hw_data->hw_bundle_num = 0;
+
+	return 0;
+}
+
+static void qat_qp_build_ring_base_gen_lce(void *io_addr, struct qat_queue *queue)
+{
+	uint64_t queue_base;
+
+	queue_base = BUILD_RING_BASE_ADDR_GEN_LCE(queue->base_phys_addr,
+					       queue->queue_size);
+	WRITE_CSR_RING_BASE_GEN_LCEVF(io_addr, queue->hw_bundle_number,
+				   queue->hw_queue_number, queue_base);
+}
+
+static void
+qat_qp_adf_arb_enable_gen_lce(const struct qat_queue *txq,
+			   void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+	    (ADF_RING_BUNDLE_SIZE_GEN_LCE *
+	     txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,
+			   arb_csr_offset);
+	value |= 0x01;
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_arb_disable_gen_lce(const struct qat_queue *txq,
+			    void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_RING_BUNDLE_SIZE_GEN_LCE *
+							txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,
+			   arb_csr_offset);
+	value &= ~(0x01);
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_configure_queues_gen_lce(struct qat_qp *qp)
+{
+	uint32_t q_tx_config, q_resp_config;
+	struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q;
+
+	/* q_tx/rx->queue_size is initialized as per bundle config register */
+	q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size);
+
+	q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size,
+					       ADF_RING_NEAR_WATERMARK_512,
+					       ADF_RING_NEAR_WATERMARK_0);
+
+	WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_tx->hw_bundle_number,
+				     q_tx->hw_queue_number, q_tx_config);
+	WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_rx->hw_bundle_number,
+				     q_rx->hw_queue_number, q_resp_config);
+}
+
+static void
+qat_qp_csr_write_tail_gen_lce(struct qat_qp *qp, struct qat_queue *q)
+{
+	WRITE_CSR_RING_TAIL_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, q->tail);
+}
+
+static void
+qat_qp_csr_write_head_gen_lce(struct qat_qp *qp, struct qat_queue *q,
+			   uint32_t new_head)
+{
+	WRITE_CSR_RING_HEAD_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, new_head);
+}
+
+static void
+qat_qp_csr_setup_gen_lce(struct qat_pci_device *qat_dev, void *io_addr,
+		      struct qat_qp *qp)
+{
+	qat_qp_build_ring_base_gen_lce(io_addr, &qp->tx_q);
+	qat_qp_build_ring_base_gen_lce(io_addr, &qp->rx_q);
+	qat_qp_adf_configure_queues_gen_lce(qp);
+	qat_qp_adf_arb_enable_gen_lce(&qp->tx_q, qp->mmap_bar_addr,
+				   &qat_dev->arb_csr_lock);
+}
+
+static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen_lce = {
+	.qat_qp_rings_per_service = qat_qp_rings_per_service_gen_lce,
+	.qat_qp_build_ring_base = qat_qp_build_ring_base_gen_lce,
+	.qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen_lce,
+	.qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen_lce,
+	.qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen_lce,
+	.qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen_lce,
+	.qat_qp_csr_write_head = qat_qp_csr_write_head_gen_lce,
+	.qat_qp_csr_setup = qat_qp_csr_setup_gen_lce,
+	.qat_qp_get_hw_data = qat_qp_get_hw_data_gen_lce,
+};
+
+static int
+qat_reset_ring_pairs_gen_lce(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static const struct rte_mem_resource*
+qat_dev_get_transport_bar_gen_lce(struct rte_pci_device *pci_dev)
+{
+	return &pci_dev->mem_resource[0];
+}
+
+static int
+qat_dev_get_misc_bar_gen_lce(struct rte_mem_resource **mem_resource,
+			  struct rte_pci_device *pci_dev)
+{
+	*mem_resource = &pci_dev->mem_resource[2];
+	return 0;
+}
+
+static int
+qat_dev_get_extra_size_gen_lce(void)
+{
+	return sizeof(struct qat_dev_gen_lce_extra);
+}
+
+static int
+qat_dev_get_slice_map_gen_lce(uint32_t *map __rte_unused,
+	const struct rte_pci_device *pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen_lce = {
+	.qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen_lce,
+	.qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen_lce,
+	.qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen_lce,
+	.qat_dev_read_config = qat_dev_read_config_gen_lce,
+	.qat_dev_get_extra_size = qat_dev_get_extra_size_gen_lce,
+	.qat_dev_get_slice_map = qat_dev_get_slice_map_gen_lce,
+};
+
+RTE_INIT(qat_dev_gen_lce_init)
+{
+	qat_qp_hw_spec[QAT_GEN_LCE] = &qat_qp_hw_spec_gen_lce;
+	qat_dev_hw_spec[QAT_GEN_LCE] = &qat_dev_hw_spec_gen_lce;
+	qat_gen_config[QAT_GEN_LCE].dev_gen = QAT_GEN_LCE;
+	qat_gen_config[QAT_GEN_LCE].pf2vf_dev = &qat_pf2vf_gen_lce;
+}
diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 62abcb6fe3..bc7c3e5b85 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -82,6 +82,7 @@ sources += files(
         'dev/qat_dev_gen2.c',
         'dev/qat_dev_gen3.c',
         'dev/qat_dev_gen4.c',
+        'dev/qat_dev_gen_lce.c',
 )
 includes += include_directories(
         'qat_adf',
@@ -108,6 +109,7 @@ if qat_crypto
             'dev/qat_crypto_pmd_gen2.c',
             'dev/qat_crypto_pmd_gen3.c',
             'dev/qat_crypto_pmd_gen4.c',
+            'dev/qat_crypto_pmd_gen_lce.c',
         ]
         sources += files(join_paths(qat_crypto_relpath, f))
     endforeach
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
new file mode 100644
index 0000000000..c9df8f5dd2
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+
+#include "adf_transport_access_macros.h"
+
+#define ADF_RINGS_PER_INT_SRCSEL_GEN4 2
+#define ADF_BANK_INT_SRC_SEL_MASK_GEN4 0x44UL
+#define ADF_BANK_INT_FLAG_CLEAR_MASK_GEN4 0x3
+#define ADF_RING_BUNDLE_SIZE_GEN_LCE 0x2000
+#define ADF_RING_CSR_RING_CONFIG_GEN_LCE 0x1000
+#define ADF_RING_CSR_RING_LBASE_GEN_LCE 0x1040
+#define ADF_RING_CSR_RING_UBASE_GEN_LCE 0x1080
+
+#define BUILD_RING_BASE_ADDR_GEN_LCE(addr, size) \
+	((((addr) >> 6) & (0xFFFFFFFFFFFFFFFFULL << (size))) << 6)
+
+#define WRITE_CSR_RING_BASE_GEN_LCE(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32);	\
+	ADF_CSR_WR(csr_base_addr,	\
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) +			\
+		ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2),		\
+		l_base);						\
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) +			\
+		ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+
+#define WRITE_CSR_RING_HEAD_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+
+#endif
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
new file mode 100644
index 0000000000..3c7232de12
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+
+#include "adf_transport_access_macros.h"
+#include "adf_transport_access_macros_gen_lce.h"
+
+#define ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF 0x0
+
+#define WRITE_CSR_RING_BASE_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2),	\
+		l_base);	\
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_HEAD_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_SRV_ARB_EN_GEN_LCEVF(csr_base_addr, bank, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+
+#endif
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index 70f0effa62..215b291b74 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -410,4 +410,18 @@ struct icp_qat_fw_la_cipher_20_req_params {
 	uint8_t    spc_auth_res_sz;
 };
 
+struct icp_qat_fw_la_cipher_30_req_params {
+		uint32_t   spc_aad_sz;
+		uint8_t    cipher_length;
+		uint8_t    reserved[2];
+		uint8_t    spc_auth_res_sz;
+		union {
+				uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+				struct {
+						uint64_t cipher_IV_ptr;
+						uint64_t resrvd1;
+			} s;
+
+		} u;
+};
 #endif
diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h
index 9411a79301..642e009f28 100644
--- a/drivers/common/qat/qat_common.h
+++ b/drivers/common/qat/qat_common.h
@@ -21,6 +21,7 @@ enum qat_device_gen {
 	QAT_GEN2,
 	QAT_GEN3,
 	QAT_GEN4,
+	QAT_GEN_LCE,
 	QAT_N_GENS
 };
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
new file mode 100644
index 0000000000..3f1668b3d3
--- /dev/null
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
@@ -0,0 +1,329 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
+#include "qat_sym_session.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
+#include "qat_crypto.h"
+#include "qat_crypto_pmd_gens.h"
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen_lce[] = {
+	QAT_SYM_AEAD_CAP(AES_GCM,
+		CAP_SET(block_size, 16),
+		CAP_RNG(key_size, 32, 32, 0), CAP_RNG(digest_size, 16, 16, 0),
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+qat_sgl_add_buffer_gen_lce(void *list_in, uint64_t addr, uint32_t len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr;
+
+	nr = list->num_bufs;
+
+	if (nr >= QAT_SYM_SGL_MAX_NUMBER) {
+		QAT_DP_LOG(ERR, "Adding %d entry failed, no empty SGL buffer", nr);
+		return -EINVAL;
+	}
+
+	list->buffers[nr].len = len;
+	list->buffers[nr].resrvd = 0;
+	list->buffers[nr].addr = addr;
+
+	list->num_bufs++;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+	QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+		nr, list->buffers[nr].len, list->buffers[nr].addr);
+#endif
+	return 0;
+}
+
+static int
+qat_sgl_fill_array_with_mbuf(struct rte_mbuf *buf, int64_t offset,
+	void *list_in, uint32_t data_len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr, buf_len;
+	int res = -EINVAL;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	uint32_t start_idx;
+	start_idx = list->num_bufs;
+#endif
+
+	/* Append to the existing list */
+	nr = list->num_bufs;
+
+	for (buf_len = 0; buf && nr < QAT_SYM_SGL_MAX_NUMBER; buf = buf->next) {
+		if (offset >= rte_pktmbuf_data_len(buf)) {
+			offset -= rte_pktmbuf_data_len(buf);
+			/* Jump to next mbuf */
+			continue;
+		}
+
+		list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset;
+		list->buffers[nr].resrvd = 0;
+		list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
+
+		offset = 0;
+		buf_len += list->buffers[nr].len;
+
+		if (buf_len >= data_len) {
+			list->buffers[nr].len -= buf_len - data_len;
+			res = 0;
+			break;
+		}
+		++nr;
+	}
+
+	if (unlikely(res != 0)) {
+		if (nr == QAT_SYM_SGL_MAX_NUMBER)
+			QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+				QAT_SYM_SGL_MAX_NUMBER);
+		else
+			QAT_DP_LOG(ERR, "Mbuf chain is too short");
+	} else {
+
+		list->num_bufs = ++nr;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+		for (nr = start_idx; nr < list->num_bufs; nr++) {
+			QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+				nr, list->buffers[nr].len,
+				list->buffers[nr].addr);
+		}
+#endif
+	}
+
+	return res;
+}
+
+static int
+qat_sym_build_op_aead_gen_lce(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_op *op = in_op;
+	uint64_t digest_phys_addr, aad_phys_addr;
+	uint16_t iv_len, aad_len, digest_len, key_len;
+	uint32_t cipher_ofs, iv_offset, cipher_len;
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct icp_qat_fw_la_cipher_30_req_params *cipher_param;
+	enum icp_qat_hw_cipher_dir dir;
+	bool is_digest_adjacent = false;
+
+	if (ctx->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER ||
+		ctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_AES256 ||
+		ctx->qat_mode != ICP_QAT_HW_CIPHER_AEAD_MODE) {
+
+		QAT_DP_LOG(ERR, "Not supported (cmd: %d, alg: %d, mode: %d). "
+			"GEN_LCE PMD only supports AES-256 AEAD mode",
+			ctx->qat_cmd, ctx->qat_cipher_alg, ctx->qat_mode);
+		return -EINVAL;
+	}
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+	cipher_param = (void *)&qat_req->serv_specif_rqpars;
+
+	dir = ctx->qat_dir;
+
+	aad_phys_addr = op->sym->aead.aad.phys_addr;
+	aad_len = ctx->aad_len;
+
+	iv_offset = ctx->cipher_iv.offset;
+	iv_len = ctx->cipher_iv.length;
+
+	cipher_ofs = op->sym->aead.data.offset;
+	cipher_len = op->sym->aead.data.length;
+
+	digest_phys_addr = op->sym->aead.digest.phys_addr;
+	digest_len = ctx->digest_length;
+
+	/* Up to 16B IV can be directly embedded in descriptor.
+	 *  GCM supports only 12B IV for GEN LCE
+	 */
+	if (iv_len != GCM_IV_LENGTH_GEN_LCE) {
+		QAT_DP_LOG(ERR, "iv_len: %d not supported. Must be 12B.",
+			iv_len);
+		return -EINVAL;
+	}
+
+	rte_memcpy(cipher_param->u.cipher_IV_array,
+		rte_crypto_op_ctod_offset(op, uint8_t*, iv_offset),
+		iv_len);
+
+	/* Always SGL */
+	RTE_ASSERT((qat_req->comn_hdr.comn_req_flags &
+		ICP_QAT_FW_SYM_COMM_ADDR_SGL) == 1);
+	/* Always inplace */
+	RTE_ASSERT(op->sym->m_dst == NULL);
+
+	/* Key buffer address is already programmed by reusing the
+	 * content-descriptor buffer
+	 */
+	key_len = ctx->auth_key_length;
+
+	cipher_param->spc_aad_sz = aad_len;
+	cipher_param->cipher_length = key_len;
+	cipher_param->spc_auth_res_sz = digest_len;
+
+	/* Knowing digest is contiguous to cipher-text helps optimizing SGL */
+	if (rte_pktmbuf_iova_offset(op->sym->m_src, cipher_ofs + cipher_len)
+		== digest_phys_addr)
+		is_digest_adjacent = true;
+
+	/* SRC-SGL: 3 entries:
+	 * a) AAD
+	 * b) cipher
+	 * c) digest (only for decrypt and buffer is_NOT_adjacent)
+	 *
+	 */
+	cookie->qat_sgl_src.num_bufs = 0;
+	if (aad_len)
+		qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src, aad_phys_addr,
+			aad_len);
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_src,
+			cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_src,
+			cipher_len);
+
+		/* Digest buffer in decrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_DECRYPT)
+			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src,
+				digest_phys_addr, digest_len);
+	}
+
+	/* (in-place) DST-SGL: 2 entries:
+	 * a) cipher
+	 * b) digest (only for encrypt and buffer is_NOT_adjacent)
+	 */
+	cookie->qat_sgl_dst.num_bufs = 0;
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_dst,
+			cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+			&cookie->qat_sgl_dst,
+			cipher_len);
+
+		/* Digest buffer in Encrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_dst,
+				digest_phys_addr, digest_len);
+	}
+
+	/* Length values in 128B descriptor */
+	qat_req->comn_mid.src_length = cipher_len;
+	qat_req->comn_mid.dst_length = cipher_len;
+
+	if (dir == ICP_QAT_HW_CIPHER_ENCRYPT) /* Digest buffer in Encrypt job */
+		qat_req->comn_mid.dst_length += GCM_256_DIGEST_LEN;
+
+	/* src & dst SGL addresses in 128B descriptor */
+	qat_req->comn_mid.src_data_addr = cookie->qat_sgl_src_phys_addr;
+	qat_req->comn_mid.dest_data_addr = cookie->qat_sgl_dst_phys_addr;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+		sizeof(struct icp_qat_fw_la_bulk_req));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
+		rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+		rte_pktmbuf_data_len(op->sym->m_src));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
+		digest_len);
+	QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data, aad_len);
+#endif
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen_lce(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+
+	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
+		return -EINVAL;
+
+	/* build request for aead */
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES256 &&
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) {
+		build_request = qat_sym_build_op_aead_gen_lce;
+		ctx->build_request[proc_type] = build_request;
+	}
+	return 0;
+}
+
+
+static int
+qat_sym_crypto_cap_get_gen_lce(struct qat_cryptodev_private *internals,
+	const char *capa_memz_name,
+	const uint16_t __rte_unused slice_map)
+{
+	const uint32_t size = sizeof(qat_sym_crypto_caps_gen_lce);
+	uint32_t i;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+			size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities");
+			return -1;
+		}
+	}
+
+	struct rte_cryptodev_capabilities *addr =
+		(struct rte_cryptodev_capabilities *)
+		internals->capa_mz->addr;
+	const struct rte_cryptodev_capabilities *capabilities =
+		qat_sym_crypto_caps_gen_lce;
+	const uint32_t capa_num =
+		size / sizeof(struct rte_cryptodev_capabilities);
+	uint32_t curr_capa = 0;
+
+	for (i = 0; i < capa_num; i++) {
+		memcpy(addr + curr_capa, capabilities + i,
+			sizeof(struct rte_cryptodev_capabilities));
+		curr_capa++;
+	}
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	return 0;
+}
+
+RTE_INIT(qat_sym_crypto_gen_lce_init)
+{
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = &qat_sym_crypto_ops_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_capabilities =
+			qat_sym_crypto_cap_get_gen_lce;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_session =
+			qat_sym_crypto_set_session_gen_lce;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_raw_dp_ctx = NULL;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags =
+			qat_sym_crypto_feature_flags_get_gen1;
+}
+
+RTE_INIT(qat_asym_crypto_gen_lce_init)
+{
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_capabilities = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].set_session = NULL;
+}
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f976009bf2..f2f197d050 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -95,6 +95,12 @@
 /* Maximum data length for single pass GMAC: 2^14-1 */
 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
 
+/* Digest length for GCM Algo is 16 bytes */
+#define GCM_256_DIGEST_LEN 16
+
+/* IV length for GCM algo is 12 bytes */
+#define GCM_IV_LENGTH_GEN_LCE 12
+
 struct qat_sym_session;
 
 struct qat_sym_sgl {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v6 2/4] common/qat: update common driver to support GEN LCE
  2024-02-28 14:00 ` [PATCH v6 0/4] add QAT GEN LCE device Nishikant Nayak
  2024-02-28 14:00   ` [PATCH v6 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
@ 2024-02-28 14:00   ` Nishikant Nayak
  2024-02-28 14:00   ` [PATCH v6 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
  2024-02-28 14:00   ` [PATCH v6 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
  3 siblings, 0 replies; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-28 14:00 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, Nishikant Nayak

Adding GEN LCE specific macros which is required for updating
the support for GEN LCE features.
Also this patch adds other macros which is being used by GEN LCE
Specific APIs.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
Acked-by: Ciara Power <ciara.power@intel.com>
---
v6:
    - Removed unused PCI device IDs from the device list.
    - Updated documentation and release note.
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
    - Fixed code formatting
---
---
 doc/guides/cryptodevs/qat.rst                 |  1 +
 doc/guides/rel_notes/release_24_03.rst        |  4 ++
 .../qat/qat_adf/adf_transport_access_macros.h |  1 +
 drivers/common/qat/qat_adf/icp_qat_fw.h       | 34 ++++++++++++++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    | 45 ++++++++++++++++++-
 drivers/common/qat/qat_device.c               |  5 +++
 6 files changed, 89 insertions(+), 1 deletion(-)

diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index dc6b95165d..d9adbfc71e 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -26,6 +26,7 @@ poll mode crypto driver support for the following hardware accelerator devices:
 * ``Intel QuickAssist Technology D15xx``
 * ``Intel QuickAssist Technology C4xxx``
 * ``Intel QuickAssist Technology 4xxx``
+* ``Intel QuickAssist Technology apfxx``
 
 
 Features
diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 879bb4944c..41dccbb0c1 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -131,6 +131,10 @@ New Features
   * Added support for comparing result between packet fields or value.
   * Added support for accumulating value of field into another one.
 
+* **Updated Intel QuickAssist Technology driver.**
+
+  * Added support for GEN LCE (1454) device, for AES-GCM only.
+
 * **Updated Marvell cnxk crypto driver.**
 
   * Added support for Rx inject in crypto_cn10k.
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
index 12a7258c60..19bd812419 100644
--- a/drivers/common/qat/qat_adf/adf_transport_access_macros.h
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
@@ -47,6 +47,7 @@
 #define ADF_RING_SIZE_512 0x03
 #define ADF_RING_SIZE_4K 0x06
 #define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_64K 0x0A
 #define ADF_RING_SIZE_4M 0x10
 #define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
 #define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw.h b/drivers/common/qat/qat_adf/icp_qat_fw.h
index 3aa17ae041..b78158e01d 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw.h
@@ -57,6 +57,12 @@ struct icp_qat_fw_comn_req_hdr_cd_pars {
 	} u;
 };
 
+struct lce_key_buff_desc {
+	uint64_t keybuff;
+	uint32_t keybuff_resrvd1;
+	uint32_t keybuff_resrvd2;
+};
+
 struct icp_qat_fw_comn_req_mid {
 	uint64_t opaque_data;
 	uint64_t src_data_addr;
@@ -123,6 +129,12 @@ struct icp_qat_fw_comn_resp {
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_BITPOS 0
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_MASK 0x1
 
+/* GEN_LCE specific Common Header fields */
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS 5
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_MASK 0x3
+#define ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT 3
+#define ICP_QAT_FW_COMN_GEN_LCE_STATUS_FLAG_ERROR 0
+
 #define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
 	icp_qat_fw_comn_req_hdr_t.service_type
 
@@ -168,6 +180,12 @@ struct icp_qat_fw_comn_resp {
 	(((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
 	 ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
 
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(valid, desc_layout) \
+	((((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+	ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) | \
+	(((desc_layout) & ICP_QAT_FW_COMN_DESC_LAYOUT_MASK) << \
+	ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS))
+
 #define QAT_COMN_PTR_TYPE_BITPOS 0
 #define QAT_COMN_PTR_TYPE_MASK 0x1
 #define QAT_COMN_CD_FLD_TYPE_BITPOS 1
@@ -180,10 +198,20 @@ struct icp_qat_fw_comn_resp {
 #define QAT_COMN_EXT_FLAGS_MASK 0x1
 #define QAT_COMN_EXT_FLAGS_USED 0x1
 
+/* GEN_LCE specific Common Request Flags fields */
+#define QAT_COMN_KEYBUF_USAGE_BITPOS 1
+#define QAT_COMN_KEYBUF_USAGE_MASK 0x1
+#define QAT_COMN_KEY_BUFFER_USED 1
+
 #define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
 	((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
 	 | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
 
+#define ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(ptr, keybuf) \
+	((((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS) | \
+	 (((keybuf) & QAT_COMN_PTR_TYPE_MASK) << \
+	   QAT_COMN_KEYBUF_USAGE_BITPOS))
+
 #define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
 	QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
 
@@ -249,6 +277,8 @@ struct icp_qat_fw_comn_resp {
 #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1
+#define QAT_COMN_RESP_INVALID_PARAM_BITPOS 1
+#define QAT_COMN_RESP_INVALID_PARAM_MASK 0x1
 #define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0
 #define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1
 
@@ -280,6 +310,10 @@ struct icp_qat_fw_comn_resp {
 	QAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \
 	QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK)
 
+#define ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_INVALID_PARAM_BITPOS, \
+	QAT_COMN_RESP_INVALID_PARAM_MASK)
+
 #define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
 #define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
 #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index 215b291b74..eba9f96685 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -22,14 +22,24 @@ enum icp_qat_fw_la_cmd_id {
 	ICP_QAT_FW_LA_CMD_DELIMITER = 18
 };
 
+/* In GEN_LCE Command ID 4 corresponds to AEAD */
+#define ICP_QAT_FW_LA_CMD_AEAD 4
+
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 #define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 
+/* GEN_LCE Hash, HMAC and GCM Verification Status */
+#define ICP_QAT_FW_LA_VER_STATUS_FAIL ICP_QAT_FW_COMN_GEN_LCE_STATUS_FLAG_ERROR
+
+
 struct icp_qat_fw_la_bulk_req {
 	struct icp_qat_fw_comn_req_hdr comn_hdr;
-	struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+	union {
+		struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+		struct lce_key_buff_desc key_buff;
+	};
 	struct icp_qat_fw_comn_req_mid comn_mid;
 	struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
 	struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
@@ -81,6 +91,21 @@ struct icp_qat_fw_la_bulk_req {
 #define ICP_QAT_FW_LA_PARTIAL_END 2
 #define QAT_LA_PARTIAL_BITPOS 0
 #define QAT_LA_PARTIAL_MASK 0x3
+
+/* GEN_LCE specific Crypto Flags fields */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS 6
+#define ICP_QAT_FW_SYM_AEAD_ALGO_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_SIZE_BITPOS 9
+#define ICP_QAT_FW_SYM_IV_SIZE_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS 11
+#define ICP_QAT_FW_SYM_IV_IN_DESC_MASK 0x1
+#define ICP_QAT_FW_SYM_IV_IN_DESC_VALID 1
+#define ICP_QAT_FW_SYM_DIRECTION_BITPOS 15
+#define ICP_QAT_FW_SYM_DIRECTION_MASK 0x1
+
+/* In GEN_LCE AEAD AES GCM Algorithm has ID 0 */
+#define QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE 0
+
 #define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
 	cmp_auth, ret_auth, update_state, \
 	ciph_iv, ciphcfg, partial) \
@@ -188,6 +213,23 @@ struct icp_qat_fw_la_bulk_req {
 	QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
 	QAT_LA_PARTIAL_MASK)
 
+/* GEN_LCE specific Crypto Flags operations */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS, \
+	ICP_QAT_FW_SYM_AEAD_ALGO_MASK)
+
+#define ICP_QAT_FW_SYM_IV_SIZE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_SIZE_BITPOS, \
+	ICP_QAT_FW_SYM_IV_SIZE_MASK)
+
+#define ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS, \
+	ICP_QAT_FW_SYM_IV_IN_DESC_MASK)
+
+#define ICP_QAT_FW_SYM_DIR_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_DIRECTION_BITPOS, \
+	ICP_QAT_FW_SYM_DIRECTION_MASK)
+
 #define QAT_FW_LA_MODE2 1
 #define QAT_FW_LA_NO_MODE2 0
 #define QAT_FW_LA_MODE2_MASK 0x1
@@ -424,4 +466,5 @@ struct icp_qat_fw_la_cipher_30_req_params {
 
 		} u;
 };
+
 #endif
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index f55dc3c6f0..6e23b9e35c 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -62,6 +62,9 @@ static const struct rte_pci_id pci_id_qat_map[] = {
 		{
 			RTE_PCI_DEVICE(0x8086, 0x4945),
 		},
+		{
+			RTE_PCI_DEVICE(0x8086, 0x1454),
+		},
 		{.device_id = 0},
 };
 
@@ -199,6 +202,8 @@ pick_gen(const struct rte_pci_device *pci_dev)
 	case 0x4943:
 	case 0x4945:
 		return QAT_GEN4;
+	case 0x1454:
+		return QAT_GEN_LCE;
 	default:
 		QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
 		return QAT_N_GENS;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v6 3/4] crypto/qat: update headers for GEN LCE support
  2024-02-28 14:00 ` [PATCH v6 0/4] add QAT GEN LCE device Nishikant Nayak
  2024-02-28 14:00   ` [PATCH v6 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
  2024-02-28 14:00   ` [PATCH v6 2/4] common/qat: update common driver to support " Nishikant Nayak
@ 2024-02-28 14:00   ` Nishikant Nayak
  2024-02-29 16:04     ` [EXT] " Akhil Goyal
  2024-02-28 14:00   ` [PATCH v6 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
  3 siblings, 1 reply; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-28 14:00 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, Nishikant Nayak

This patch handles the changes required for updating the common
header fields specific to GEN LCE, Also added/updated of the response
processing APIs based on GEN LCE requirement.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
Acked-by: Ciara Power <ciara.power@intel.com>
---
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
    - Added GEN LCE specific API for deque burst.
    - Fixed code formatting.
---
---
 drivers/crypto/qat/qat_sym.c         | 16 ++++++-
 drivers/crypto/qat/qat_sym.h         | 60 ++++++++++++++++++++++++++-
 drivers/crypto/qat/qat_sym_session.c | 62 +++++++++++++++++++++++++++-
 drivers/crypto/qat/qat_sym_session.h | 10 ++++-
 4 files changed, 140 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 6e03bde841..439a3fc00b 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -180,7 +180,15 @@ qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
 	return qat_dequeue_op_burst(qp, (void **)ops,
-				qat_sym_process_response, nb_ops);
+			qat_sym_process_response, nb_ops);
+}
+
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops,
+							uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+			qat_sym_process_response_gen_lce, nb_ops);
 }
 
 int
@@ -200,6 +208,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
 	struct rte_cryptodev *cryptodev;
 	struct qat_cryptodev_private *internals;
+	enum qat_device_gen qat_dev_gen = qat_pci_dev->qat_dev_gen;
 	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
 		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
 
@@ -249,7 +258,10 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
 	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
-	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
+	if (qat_dev_gen == QAT_GEN_LCE)
+		cryptodev->dequeue_burst = qat_sym_dequeue_burst_gen_lce;
+	else
+		cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
 	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f2f197d050..3461113c13 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -90,7 +90,7 @@
 /*
  * Maximum number of SGL entries
  */
-#define QAT_SYM_SGL_MAX_NUMBER	16
+#define QAT_SYM_SGL_MAX_NUMBER 16
 
 /* Maximum data length for single pass GMAC: 2^14-1 */
 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
@@ -142,6 +142,10 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 #ifdef RTE_QAT_OPENSSL
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
@@ -390,6 +394,52 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
 	return 1;
 }
 
+static __rte_always_inline int
+qat_sym_process_response_gen_lce(void **op, uint8_t *resp,
+	void *op_cookie __rte_unused,
+	uint64_t *dequeue_err_count __rte_unused)
+{
+	struct icp_qat_fw_comn_resp *resp_msg =
+		(struct icp_qat_fw_comn_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+		(resp_msg->opaque_data);
+	struct qat_sym_session *sess;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+		sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+	sess = CRYPTODEV_GET_SYM_SESS_PRIV(rx_op->sym->session);
+
+	rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+		ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+	else if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+		ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+	if (sess->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		if (ICP_QAT_FW_LA_VER_STATUS_FAIL ==
+			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+				resp_msg->comn_hdr.comn_status))
+			rx_op->status =	RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	}
+
+	*op = (void *)rx_op;
+
+	/*
+	 * return 1 as dequeue op only move on to the next op
+	 * if one was ready to return to API
+	 */
+	return 1;
+}
+
 int
 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
@@ -455,7 +505,13 @@ qat_sym_preprocess_requests(void **ops __rte_unused,
 
 static inline void
 qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
-	void *op_cookie __rte_unused)
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
+{
+}
+
+static inline void
+qat_sym_process_response_gen_lce(void **op __rte_unused, uint8_t *resp __rte_unused,
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
 {
 }
 
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 9f4f6c3d93..8f50b61365 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -136,6 +136,9 @@ qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 static void
 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session);
+
 /* Req/cd init functions */
 
 static void
@@ -738,6 +741,12 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		session->qat_cmd);
 		return -ENOTSUP;
 	}
+
+	if (qat_dev_gen == QAT_GEN_LCE) {
+		qat_sym_session_init_gen_lce_hdr(session);
+		return 0;
+	}
+
 	qat_sym_session_finalize(session);
 
 	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
@@ -1016,6 +1025,12 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 			dev->data->dev_private;
 	enum qat_device_gen qat_dev_gen =
 			internals->qat_dev->qat_dev_gen;
+	if (qat_dev_gen == QAT_GEN_LCE) {
+		struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+		struct lce_key_buff_desc *key_buff = &req_tmpl->key_buff;
+
+		key_buff->keybuff = session->key_paddr;
+	}
 
 	/*
 	 * Store AEAD IV parameters as cipher IV,
@@ -1079,9 +1094,15 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 	}
 
 	if (session->is_single_pass) {
-		if (qat_sym_cd_cipher_set(session,
+		if (qat_dev_gen != QAT_GEN_LCE) {
+			if (qat_sym_cd_cipher_set(session,
 				aead_xform->key.data, aead_xform->key.length))
-			return -EINVAL;
+				return -EINVAL;
+		} else {
+			session->auth_key_length = aead_xform->key.length;
+			memcpy(session->key_array, aead_xform->key.data,
+							aead_xform->key.length);
+		}
 	} else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
 			aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
 			(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
@@ -1970,6 +1991,43 @@ qat_sym_session_init_common_hdr(struct qat_sym_session *session)
 					ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
 }
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session)
+{
+	struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+	/*
+	 * GEN_LCE specifies separate command id for AEAD operations but Cryptodev
+	 * API processes AEAD operations as Single pass Crypto operations.
+	 * Hence even for GEN_LCE, Session Algo Command ID is CIPHER.
+	 * Note, however Session Algo Mode is AEAD.
+	 */
+	header->service_cmd_id = ICP_QAT_FW_LA_CMD_AEAD;
+	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+	header->hdr_flags =
+	ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(ICP_QAT_FW_COMN_REQ_FLAG_SET,
+			ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT);
+	header->comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(QAT_COMN_PTR_TYPE_SGL,
+			QAT_COMN_KEY_BUFFER_USED);
+
+	ICP_QAT_FW_SYM_AEAD_ALGO_SET(header->serv_specif_flags,
+		QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE);
+	ICP_QAT_FW_SYM_IV_SIZE_SET(header->serv_specif_flags,
+		ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+	ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(header->serv_specif_flags,
+		ICP_QAT_FW_SYM_IV_IN_DESC_VALID);
+
+	if (session->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
+			ICP_QAT_HW_CIPHER_DECRYPT);
+	} else {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
+			ICP_QAT_HW_CIPHER_ENCRYPT);
+	}
+}
+
 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 						const uint8_t *cipherkey,
 						uint32_t cipherkeylen)
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 9209e2e8df..958af03405 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -111,10 +111,16 @@ struct qat_sym_session {
 	enum icp_qat_hw_auth_op auth_op;
 	enum icp_qat_hw_auth_mode auth_mode;
 	void *bpi_ctx;
-	struct qat_sym_cd cd;
+	union {
+		struct qat_sym_cd cd;
+		uint8_t key_array[32];
+	};
 	uint8_t prefix_state[QAT_PREFIX_TBL_SIZE] __rte_cache_aligned;
 	uint8_t *cd_cur_ptr;
-	phys_addr_t cd_paddr;
+	union {
+		phys_addr_t cd_paddr;
+		phys_addr_t key_paddr;
+	};
 	phys_addr_t prefix_paddr;
 	struct icp_qat_fw_la_bulk_req fw_req;
 	uint8_t aad_len;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v6 4/4] test/cryptodev: add tests for GCM with AAD
  2024-02-28 14:00 ` [PATCH v6 0/4] add QAT GEN LCE device Nishikant Nayak
                     ` (2 preceding siblings ...)
  2024-02-28 14:00   ` [PATCH v6 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
@ 2024-02-28 14:00   ` Nishikant Nayak
  2024-02-29 15:52     ` [EXT] " Akhil Goyal
  3 siblings, 1 reply; 47+ messages in thread
From: Nishikant Nayak @ 2024-02-28 14:00 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi,
	Nishikant Nayak, Akhil Goyal, Fan Zhang

Adding one new unit test code for validating the features
added as part of GCM with 64 byte AAD.
The new test case adds one new test for GCM algo for both
encrypt and decrypt operations.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
Acked-by: Ciara Power <ciara.power@intel.com>
---
v2:
    - Removed unused code.
    - Added one new unit test, AAD with GCM for GEN LCE.
---
---
 app/test/test_cryptodev.c                   | 48 +++++++++++++---
 app/test/test_cryptodev_aead_test_vectors.h | 62 +++++++++++++++++++++
 2 files changed, 103 insertions(+), 7 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 38a65aa88f..edd23731f7 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -12494,6 +12494,18 @@ test_AES_GCM_auth_decryption_test_case_256_7(void)
 	return test_authenticated_decryption(&gcm_test_case_256_7);
 }
 
+static int
+test_AES_GCM_auth_decryption_test_case_256_8(void)
+{
+	return test_authenticated_decryption(&gcm_test_case_256_8);
+}
+
+static int
+test_AES_GCM_auth_encryption_test_case_256_8(void)
+{
+	return test_authenticated_encryption(&gcm_test_case_256_8);
+}
+
 static int
 test_AES_GCM_auth_decryption_test_case_aad_1(void)
 {
@@ -12613,10 +12625,16 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
@@ -12719,16 +12737,22 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
-		return TEST_SKIPPED;
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
 
 	/* not supported with CPU crypto and raw data-path APIs*/
 	if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO ||
 			global_api_test_type == CRYPTODEV_RAW_API_TEST)
 		return TEST_SKIPPED;
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
+		return TEST_SKIPPED;
 
 	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
 			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
@@ -15749,10 +15773,16 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+		&cap_idx);
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	/*
@@ -17392,6 +17422,8 @@ static struct unit_test_suite cryptodev_aes_gcm_auth_testsuite  = {
 			test_AES_GCM_auth_encryption_test_case_256_6),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_auth_encryption_test_case_256_7),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encryption_test_case_256_8),
 
 		/** AES GCM Authenticated Decryption 256 bits key */
 		TEST_CASE_ST(ut_setup, ut_teardown,
@@ -17408,6 +17440,8 @@ static struct unit_test_suite cryptodev_aes_gcm_auth_testsuite  = {
 			test_AES_GCM_auth_decryption_test_case_256_6),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_auth_decryption_test_case_256_7),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_decryption_test_case_256_8),
 
 		/** AES GCM Authenticated Encryption big aad size */
 		TEST_CASE_ST(ut_setup, ut_teardown,
diff --git a/app/test/test_cryptodev_aead_test_vectors.h b/app/test/test_cryptodev_aead_test_vectors.h
index 07292620a4..eadf206e4d 100644
--- a/app/test/test_cryptodev_aead_test_vectors.h
+++ b/app/test/test_cryptodev_aead_test_vectors.h
@@ -17,6 +17,16 @@ static uint8_t gcm_aad_text[MAX_AAD_LENGTH] = {
 		0x00, 0xf1, 0xe2, 0xd3, 0xc4, 0xb5, 0xa6, 0x97,
 		0x88, 0x79, 0x6a, 0x5b, 0x4c, 0x3d, 0x2e, 0x1f };
 
+static uint8_t gcm_aad_64B_text[MAX_AAD_LENGTH] = {
+		0xED, 0x3E, 0xA8, 0x1F, 0x74, 0xE5, 0xD1, 0x96,
+		0xA4, 0xD5, 0x4B, 0x26, 0xBB, 0x20, 0x61, 0x7B,
+		0x3B, 0x9C, 0x2A, 0x69, 0x90, 0xEF, 0xD7, 0x9A,
+		0x94, 0xC2, 0xF5, 0x86, 0xBD, 0x00, 0xF6, 0xEA,
+		0x0B, 0x14, 0x24, 0xF2, 0x08, 0x67, 0x42, 0x3A,
+		0xB5, 0xB8, 0x32, 0x97, 0xB5, 0x99, 0x69, 0x75,
+		0x60, 0x00, 0x8F, 0xF7, 0x6F, 0x16, 0x52, 0x66,
+		0xF1, 0xA9, 0x38, 0xFD, 0xB0, 0x61, 0x60, 0xB5 };
+
 static uint8_t ccm_aad_test_1[8] = {
 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
 };
@@ -1736,6 +1746,58 @@ static const struct aead_test_data gcm_test_case_256_7 = {
 	}
 };
 
+static const struct aead_test_data gcm_test_case_256_8 = {
+	.algo = RTE_CRYPTO_AEAD_AES_GCM,
+	.key = {
+		.data = {
+			0xD8, 0xFD, 0x8F, 0x5A, 0x13, 0x7B, 0x05, 0x2C,
+			0xA4, 0x64, 0x7A, 0xDD, 0x1E, 0x9A, 0x68, 0x33,
+			0x04, 0x70, 0xE8, 0x1E, 0x42, 0x84, 0x64, 0xD2,
+			0x23, 0xA1, 0x6A, 0x0A, 0x05, 0x7B, 0x90, 0xDE},
+		.len = 32
+	},
+	.iv = {
+		.data = {
+			0x8D, 0xDF, 0xB8, 0x7F, 0xD0, 0x79, 0x77, 0x55,
+			0xD5, 0x48, 0x03, 0x05},
+		.len = 12
+	},
+	.aad = {
+		.data = gcm_aad_64B_text,
+		.len = 64
+	},
+	.plaintext = {
+		.data = {
+			0x4D, 0xBC, 0x2C, 0x7F, 0x25, 0x1F, 0x07, 0x25,
+			0x54, 0x8C, 0x43, 0xDB, 0xD8, 0x06, 0x9F, 0xBF,
+			0xCA, 0x60, 0xF4, 0xEF, 0x13, 0x87, 0xE8, 0x2F,
+			0x4D, 0x9D, 0x1D, 0x87, 0x9F, 0x91, 0x79, 0x7E,
+			0x3E, 0x98, 0xA3, 0x63, 0xC6, 0xFE, 0xDB, 0x35,
+			0x96, 0x59, 0xB2, 0x0C, 0x80, 0x96, 0x70, 0x07,
+			0x87, 0x42, 0xAB, 0x4F, 0x31, 0x73, 0xC4, 0xF9,
+			0xB0, 0x1E, 0xF1, 0xBC, 0x7D, 0x45, 0xE5, 0xF3},
+		.len = 64
+	},
+	.ciphertext = {
+	    .data = {
+			0x21, 0xFA, 0x59, 0x4F, 0x1F, 0x6B, 0x19, 0xC2,
+			0x68, 0xBC, 0x05, 0x93, 0x4E, 0x48, 0x6C, 0x5B,
+			0x0B, 0x7A, 0x43, 0xB7, 0x60, 0x8E, 0x00, 0xC4,
+			0xAB, 0x14, 0x6B, 0xCC, 0xA1, 0x27, 0x6A, 0xDE,
+			0x8E, 0xB6, 0x98, 0xBB, 0x4F, 0xD0, 0x6F, 0x30,
+			0x0F, 0x04, 0xA8, 0x5B, 0xDC, 0xD8, 0xE8, 0x8A,
+			0x73, 0xD9, 0xB8, 0x60, 0x7C, 0xE4, 0x32, 0x4C,
+			0x3A, 0x0B, 0xC2, 0x82, 0xDA, 0x88, 0x17, 0x69},
+	    .len = 64
+	},
+	.auth_tag = {
+		.data = {
+			0x3B, 0x80, 0x83, 0x72, 0xE5, 0x1B, 0x94, 0x15,
+			0x75, 0xC8, 0x62, 0xBC, 0xA1, 0x66, 0x91, 0x45},
+		.len = 16
+	}
+};
+
 /** variable AAD AES-GCM-128 Test Vectors */
 static const struct aead_test_data gcm_test_case_aad_1 = {
 	.algo = RTE_CRYPTO_AEAD_AES_GCM,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* RE: [PATCH v4 0/4] add QAT GEN LCE device
  2024-02-27  9:54   ` [PATCH v4 0/4] add QAT GEN LCE device Power, Ciara
@ 2024-02-29  9:47     ` Kusztal, ArkadiuszX
  0 siblings, 0 replies; 47+ messages in thread
From: Kusztal, ArkadiuszX @ 2024-02-29  9:47 UTC (permalink / raw)
  To: Power, Ciara, Nayak, Nishikanta, dev; +Cc: Ji, Kai, S Joshi, Rakesh



> -----Original Message-----
> From: Power, Ciara <ciara.power@intel.com>
> Sent: Tuesday, February 27, 2024 10:55 AM
> To: Nayak, Nishikanta <nishikanta.nayak@intel.com>; dev@dpdk.org
> Cc: Ji, Kai <kai.ji@intel.com>; Kusztal, ArkadiuszX
> <arkadiuszx.kusztal@intel.com>; S Joshi, Rakesh <rakesh.s.joshi@intel.com>
> Subject: RE: [PATCH v4 0/4] add QAT GEN LCE device
> 
> 
> 
> > -----Original Message-----
> > From: Nayak, Nishikanta <nishikanta.nayak@intel.com>
> > Sent: Tuesday, February 27, 2024 9:40 AM
> > To: dev@dpdk.org
> > Cc: Power, Ciara <ciara.power@intel.com>; Ji, Kai <kai.ji@intel.com>;
> > Kusztal, ArkadiuszX <arkadiuszx.kusztal@intel.com>; S Joshi, Rakesh
> > <rakesh.s.joshi@intel.com>; Nayak, Nishikanta
> > <nishikanta.nayak@intel.com>
> > Subject: [PATCH v4 0/4] add QAT GEN LCE device
> >
> > This patchset adds a new QAT LCE device.
> > The device currently only supports symmetric crypto, and only the
> > AES-GCM algorithm.
> >
> > v4:
> >   - Fixed cover letter, v3 included the wrong details relating
> >     to another patchset.
> > v3:
> >   - Fixed typos in commit and code comments.
> >   - Replaced use of linux/kernel.h macro with local macro
> >     to fix ARM compilation in CI.
> > v2:
> >    - Renamed device from GEN 5 to GEN LCE.
> >    - Removed unused code.
> >    - Updated macro names.
> >
> > Nishikant Nayak (4):
> >   common/qat: add files specific to GEN LCE
> >   common/qat: update common driver to support GEN LCE
> >   crypto/qat: update headers for GEN LCE support
> >   test/cryptodev: add tests for GCM with AAD
> 
> Series-acked-by: Ciara Power <ciara.power@intel.com>
Series-acked-by: Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com>

^ permalink raw reply	[flat|nested] 47+ messages in thread

* RE: [EXT] [PATCH v6 4/4] test/cryptodev: add tests for GCM with AAD
  2024-02-28 14:00   ` [PATCH v6 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
@ 2024-02-29 15:52     ` Akhil Goyal
  2024-02-29 16:32       ` Power, Ciara
  0 siblings, 1 reply; 47+ messages in thread
From: Akhil Goyal @ 2024-02-29 15:52 UTC (permalink / raw)
  To: Nishikant Nayak, dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, Fan Zhang

> Adding one new unit test code for validating the features
> added as part of GCM with 64 byte AAD.
> The new test case adds one new test for GCM algo for both
> encrypt and decrypt operations.
> 
> Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
> Acked-by: Ciara Power <ciara.power@intel.com>
> ---
What is the need for this new test vector? How is this case not covered in existing cases?
Can you explain in the patch description?
How is it different than gcm_test_case_aad_2 case and other gcm 128 cases?


> @@ -12719,16 +12737,22 @@ test_authenticated_decryption_oop(const struct
> aead_test_data *tdata)
> 
>  	/* Verify the capabilities */
>  	struct rte_cryptodev_sym_capability_idx cap_idx;
> +	const struct rte_cryptodev_symmetric_capability *capability;
>  	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
>  	cap_idx.algo.aead = tdata->algo;
> -	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
> -			&cap_idx) == NULL)
> -		return TEST_SKIPPED;
> +	capability = rte_cryptodev_sym_capability_get(ts_params-
> >valid_devs[0],
> +		&cap_idx);
> 
>  	/* not supported with CPU crypto and raw data-path APIs*/
>  	if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO ||
>  			global_api_test_type == CRYPTODEV_RAW_API_TEST)
>  		return TEST_SKIPPED;
> +	if (capability == NULL)
> +		return TEST_SKIPPED;

You should check the capability just after it is retrieved.
 

^ permalink raw reply	[flat|nested] 47+ messages in thread

* RE: [EXT] [PATCH v6 3/4] crypto/qat: update headers for GEN LCE support
  2024-02-28 14:00   ` [PATCH v6 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
@ 2024-02-29 16:04     ` Akhil Goyal
  0 siblings, 0 replies; 47+ messages in thread
From: Akhil Goyal @ 2024-02-29 16:04 UTC (permalink / raw)
  To: Nishikant Nayak, dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi

> This patch handles the changes required for updating the common
> header fields specific to GEN LCE, Also added/updated of the response
> processing APIs based on GEN LCE requirement.
> 
> Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
> Acked-by: Ciara Power <ciara.power@intel.com>
> ---
> v2:
>     - Renamed device from GEN 5 to GEN LCE.
>     - Removed unused code.
>     - Updated macro names.
>     - Added GEN LCE specific API for deque burst.
>     - Fixed code formatting.
> ---
> ---
>  drivers/crypto/qat/qat_sym.c         | 16 ++++++-
>  drivers/crypto/qat/qat_sym.h         | 60 ++++++++++++++++++++++++++-
>  drivers/crypto/qat/qat_sym_session.c | 62 +++++++++++++++++++++++++++-
>  drivers/crypto/qat/qat_sym_session.h | 10 ++++-
>  4 files changed, 140 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
> index 6e03bde841..439a3fc00b 100644
> --- a/drivers/crypto/qat/qat_sym.c
> +++ b/drivers/crypto/qat/qat_sym.c
> @@ -180,7 +180,15 @@ qat_sym_dequeue_burst(void *qp, struct rte_crypto_op
> **ops,
>  		uint16_t nb_ops)
>  {
>  	return qat_dequeue_op_burst(qp, (void **)ops,
> -				qat_sym_process_response, nb_ops);
> +			qat_sym_process_response, nb_ops);

Unnecessary change. Please remove unnecessary changes which should not be part of this patch.

The maximum length of characters in a line is 100 now. You can format the code as per that.
Since QAT has long macros etc. it would be better to leverage the 100 character per line.
The code would look more readable.
This is a general comment on the complete patchset.

> +}
> +
> +uint16_t
> +qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops,
> +							uint16_t nb_ops)
> +{
> +	return qat_dequeue_op_burst(qp, (void **)ops,
> +			qat_sym_process_response_gen_lce, nb_ops);
>  }
> 
>  int
> @@ -200,6 +208,7 @@ qat_sym_dev_create(struct qat_pci_device
> *qat_pci_dev,
>  	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
>  	struct rte_cryptodev *cryptodev;
>  	struct qat_cryptodev_private *internals;
> +	enum qat_device_gen qat_dev_gen = qat_pci_dev->qat_dev_gen;
>  	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
>  		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
> 
> @@ -249,7 +258,10 @@ qat_sym_dev_create(struct qat_pci_device
> *qat_pci_dev,
>  	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
> 
>  	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
> -	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
> +	if (qat_dev_gen == QAT_GEN_LCE)
> +		cryptodev->dequeue_burst = qat_sym_dequeue_burst_gen_lce;
> +	else
> +		cryptodev->dequeue_burst = qat_sym_dequeue_burst;
> 
>  	cryptodev->feature_flags = gen_dev_ops-
> >get_feature_flags(qat_pci_dev);
> 
> diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
> index f2f197d050..3461113c13 100644
> --- a/drivers/crypto/qat/qat_sym.h
> +++ b/drivers/crypto/qat/qat_sym.h
> @@ -90,7 +90,7 @@
>  /*
>   * Maximum number of SGL entries
>   */
> -#define QAT_SYM_SGL_MAX_NUMBER	16
> +#define QAT_SYM_SGL_MAX_NUMBER 16

Again unnecessary change.

> 
>  /* Maximum data length for single pass GMAC: 2^14-1 */
>  #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
> @@ -142,6 +142,10 @@ uint16_t
>  qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
>  		uint16_t nb_ops);
> 
> +uint16_t
> +qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops,
> +		uint16_t nb_ops);
> +
>  #ifdef RTE_QAT_OPENSSL
>  /** Encrypt a single partial block
>   *  Depends on openssl libcrypto
> @@ -390,6 +394,52 @@ qat_sym_process_response(void **op, uint8_t *resp,
> void *op_cookie,
>  	return 1;
>  }
> 
> +static __rte_always_inline int
> +qat_sym_process_response_gen_lce(void **op, uint8_t *resp,
> +	void *op_cookie __rte_unused,
> +	uint64_t *dequeue_err_count __rte_unused)
> +{
> +	struct icp_qat_fw_comn_resp *resp_msg =
> +		(struct icp_qat_fw_comn_resp *)resp;
> +	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
> +		(resp_msg->opaque_data);
> +	struct qat_sym_session *sess;
> +
> +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
> +	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
> +		sizeof(struct icp_qat_fw_comn_resp));
> +#endif
> +
> +	sess = CRYPTODEV_GET_SYM_SESS_PRIV(rx_op->sym->session);
> +
> +	rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
> +
> +	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
> +
> 	ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(
> +			resp_msg->comn_hdr.comn_status))
> +		rx_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
> +
> +	else if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
> +		ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(
> +			resp_msg->comn_hdr.comn_status))
> +		rx_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> +
> +	if (sess->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
> +		if (ICP_QAT_FW_LA_VER_STATUS_FAIL ==
> +			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
> +				resp_msg->comn_hdr.comn_status))
> +			rx_op->status =
> 	RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
> +	}
> +
> +	*op = (void *)rx_op;
> +
> +	/*
> +	 * return 1 as dequeue op only move on to the next op
> +	 * if one was ready to return to API
> +	 */
> +	return 1;
> +}
> +
>  int
>  qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
>  	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
> @@ -455,7 +505,13 @@ qat_sym_preprocess_requests(void **ops
> __rte_unused,
> 
>  static inline void
>  qat_sym_process_response(void **op __rte_unused, uint8_t *resp
> __rte_unused,
> -	void *op_cookie __rte_unused)
> +	void *op_cookie __rte_unused, uint64_t *dequeue_err_count
> __rte_unused)
> +{
> +}
> +
> +static inline void
> +qat_sym_process_response_gen_lce(void **op __rte_unused, uint8_t *resp
> __rte_unused,
> +	void *op_cookie __rte_unused, uint64_t *dequeue_err_count
> __rte_unused)
>  {
>  }
> 
> diff --git a/drivers/crypto/qat/qat_sym_session.c
> b/drivers/crypto/qat/qat_sym_session.c
> index 9f4f6c3d93..8f50b61365 100644
> --- a/drivers/crypto/qat/qat_sym_session.c
> +++ b/drivers/crypto/qat/qat_sym_session.c
> @@ -136,6 +136,9 @@ qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
>  static void
>  qat_sym_session_init_common_hdr(struct qat_sym_session *session);
> 
> +static void
> +qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session);
> +
>  /* Req/cd init functions */
> 
>  static void
> @@ -738,6 +741,12 @@ qat_sym_session_set_parameters(struct rte_cryptodev
> *dev,
>  		session->qat_cmd);
>  		return -ENOTSUP;
>  	}
> +
> +	if (qat_dev_gen == QAT_GEN_LCE) {
> +		qat_sym_session_init_gen_lce_hdr(session);
> +		return 0;
> +	}
> +
>  	qat_sym_session_finalize(session);
> 
>  	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
> @@ -1016,6 +1025,12 @@ qat_sym_session_configure_aead(struct
> rte_cryptodev *dev,
>  			dev->data->dev_private;
>  	enum qat_device_gen qat_dev_gen =
>  			internals->qat_dev->qat_dev_gen;
> +	if (qat_dev_gen == QAT_GEN_LCE) {
> +		struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
> +		struct lce_key_buff_desc *key_buff = &req_tmpl->key_buff;
> +
> +		key_buff->keybuff = session->key_paddr;
> +	}
> 
>  	/*
>  	 * Store AEAD IV parameters as cipher IV,
> @@ -1079,9 +1094,15 @@ qat_sym_session_configure_aead(struct
> rte_cryptodev *dev,
>  	}
> 
>  	if (session->is_single_pass) {
> -		if (qat_sym_cd_cipher_set(session,
> +		if (qat_dev_gen != QAT_GEN_LCE) {
> +			if (qat_sym_cd_cipher_set(session,
>  				aead_xform->key.data, aead_xform-
> >key.length))
> -			return -EINVAL;
> +				return -EINVAL;
> +		} else {
> +			session->auth_key_length = aead_xform->key.length;
> +			memcpy(session->key_array, aead_xform->key.data,
> +							aead_xform-
> >key.length);
> +		}
>  	} else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
>  			aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
>  			(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT
> &&
> @@ -1970,6 +1991,43 @@ qat_sym_session_init_common_hdr(struct
> qat_sym_session *session)
> 
> 	ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
>  }
> 
> +static void
> +qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session)
> +{
> +	struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
> +	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
> +
> +	/*
> +	 * GEN_LCE specifies separate command id for AEAD operations but
> Cryptodev
> +	 * API processes AEAD operations as Single pass Crypto operations.
> +	 * Hence even for GEN_LCE, Session Algo Command ID is CIPHER.
> +	 * Note, however Session Algo Mode is AEAD.
> +	 */
> +	header->service_cmd_id = ICP_QAT_FW_LA_CMD_AEAD;
> +	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
> +	header->hdr_flags =
> +
> 	ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(ICP_QAT_FW_COM
> N_REQ_FLAG_SET,
> +			ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT);
> +	header->comn_req_flags =
> +
> 	ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(QAT_COMN_PTR_TYPE_
> SGL,
> +			QAT_COMN_KEY_BUFFER_USED);
> +
> +	ICP_QAT_FW_SYM_AEAD_ALGO_SET(header->serv_specif_flags,
> +		QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE);
> +	ICP_QAT_FW_SYM_IV_SIZE_SET(header->serv_specif_flags,
> +		ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
> +	ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(header->serv_specif_flags,
> +		ICP_QAT_FW_SYM_IV_IN_DESC_VALID);
> +
> +	if (session->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
> +		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
> +			ICP_QAT_HW_CIPHER_DECRYPT);
> +	} else {
> +		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
> +			ICP_QAT_HW_CIPHER_ENCRYPT);
> +	}
> +}
> +
>  int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
>  						const uint8_t *cipherkey,
>  						uint32_t cipherkeylen)
> diff --git a/drivers/crypto/qat/qat_sym_session.h
> b/drivers/crypto/qat/qat_sym_session.h
> index 9209e2e8df..958af03405 100644
> --- a/drivers/crypto/qat/qat_sym_session.h
> +++ b/drivers/crypto/qat/qat_sym_session.h
> @@ -111,10 +111,16 @@ struct qat_sym_session {
>  	enum icp_qat_hw_auth_op auth_op;
>  	enum icp_qat_hw_auth_mode auth_mode;
>  	void *bpi_ctx;
> -	struct qat_sym_cd cd;
> +	union {
> +		struct qat_sym_cd cd;
> +		uint8_t key_array[32];
> +	};
>  	uint8_t prefix_state[QAT_PREFIX_TBL_SIZE] __rte_cache_aligned;
>  	uint8_t *cd_cur_ptr;
> -	phys_addr_t cd_paddr;
> +	union {
> +		phys_addr_t cd_paddr;
> +		phys_addr_t key_paddr;
> +	};
>  	phys_addr_t prefix_paddr;
>  	struct icp_qat_fw_la_bulk_req fw_req;
>  	uint8_t aad_len;
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* RE: [EXT] [PATCH v6 1/4] common/qat: add files specific to GEN LCE
  2024-02-28 14:00   ` [PATCH v6 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
@ 2024-02-29 16:09     ` Akhil Goyal
  2024-02-29 16:14     ` Akhil Goyal
  1 sibling, 0 replies; 47+ messages in thread
From: Akhil Goyal @ 2024-02-29 16:09 UTC (permalink / raw)
  To: Nishikant Nayak, dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi,
	Thomas Monjalon, Anatoly Burakov

> a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
> b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
> new file mode 100644
> index 0000000000..c9df8f5dd2
> --- /dev/null
> +++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
> @@ -0,0 +1,51 @@
> +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
> + * Copyright(c) 2021 Intel Corporation
> + */

I believe copyright year is a typo here.



^ permalink raw reply	[flat|nested] 47+ messages in thread

* RE: [EXT] [PATCH v6 1/4] common/qat: add files specific to GEN LCE
  2024-02-28 14:00   ` [PATCH v6 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
  2024-02-29 16:09     ` [EXT] " Akhil Goyal
@ 2024-02-29 16:14     ` Akhil Goyal
  2024-02-29 16:30       ` Power, Ciara
  1 sibling, 1 reply; 47+ messages in thread
From: Akhil Goyal @ 2024-02-29 16:14 UTC (permalink / raw)
  To: Nishikant Nayak, dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi,
	Thomas Monjalon, Anatoly Burakov

> Adding GEN5 files for handling GEN LCE specific operations.
> These files are inherited from the existing files/APIs
> which has some changes specific GEN5 requirements

It is not a good practice to use "adding files specific to .."
Instead please explain what operation/feature is added for new device.


> Also updated the mailmap file.
> 
> Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
> Acked-by: Ciara Power <ciara.power@intel.com>

^ permalink raw reply	[flat|nested] 47+ messages in thread

* RE: [EXT] [PATCH v6 1/4] common/qat: add files specific to GEN LCE
  2024-02-29 16:14     ` Akhil Goyal
@ 2024-02-29 16:30       ` Power, Ciara
  0 siblings, 0 replies; 47+ messages in thread
From: Power, Ciara @ 2024-02-29 16:30 UTC (permalink / raw)
  To: Akhil Goyal, Nayak, Nishikanta, dev
  Cc: Ji, Kai, Kusztal, ArkadiuszX, S Joshi, Rakesh, Thomas Monjalon,
	Burakov, Anatoly



> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Thursday, February 29, 2024 4:14 PM
> To: Nayak, Nishikanta <nishikanta.nayak@intel.com>; dev@dpdk.org
> Cc: Power, Ciara <ciara.power@intel.com>; Ji, Kai <kai.ji@intel.com>; Kusztal,
> ArkadiuszX <arkadiuszx.kusztal@intel.com>; S Joshi, Rakesh
> <rakesh.s.joshi@intel.com>; Thomas Monjalon <thomas@monjalon.net>;
> Burakov, Anatoly <anatoly.burakov@intel.com>
> Subject: RE: [EXT] [PATCH v6 1/4] common/qat: add files specific to GEN LCE
> 
> > Adding GEN5 files for handling GEN LCE specific operations.
> > These files are inherited from the existing files/APIs which has some
> > changes specific GEN5 requirements
> 
> It is not a good practice to use "adding files specific to .."
> Instead please explain what operation/feature is added for new device.

Ok, we can squash this with patch #2 when the device ID is supported and functions are being used.
Will update in next version.

Thanks,
Ciara

> 
> 
> > Also updated the mailmap file.
> >
> > Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
> > Acked-by: Ciara Power <ciara.power@intel.com>

^ permalink raw reply	[flat|nested] 47+ messages in thread

* RE: [EXT] [PATCH v6 4/4] test/cryptodev: add tests for GCM with AAD
  2024-02-29 15:52     ` [EXT] " Akhil Goyal
@ 2024-02-29 16:32       ` Power, Ciara
  0 siblings, 0 replies; 47+ messages in thread
From: Power, Ciara @ 2024-02-29 16:32 UTC (permalink / raw)
  To: Akhil Goyal, Nayak, Nishikanta, dev
  Cc: Ji, Kai, Kusztal, ArkadiuszX, S Joshi, Rakesh, Fan Zhang



> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Thursday, February 29, 2024 3:52 PM
> To: Nayak, Nishikanta <nishikanta.nayak@intel.com>; dev@dpdk.org
> Cc: Power, Ciara <ciara.power@intel.com>; Ji, Kai <kai.ji@intel.com>; Kusztal,
> ArkadiuszX <arkadiuszx.kusztal@intel.com>; S Joshi, Rakesh
> <rakesh.s.joshi@intel.com>; Fan Zhang <fanzhang.oss@gmail.com>
> Subject: RE: [EXT] [PATCH v6 4/4] test/cryptodev: add tests for GCM with AAD
> 
> > Adding one new unit test code for validating the features added as
> > part of GCM with 64 byte AAD.
> > The new test case adds one new test for GCM algo for both encrypt and
> > decrypt operations.
> >
> > Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
> > Acked-by: Ciara Power <ciara.power@intel.com>
> > ---
> What is the need for this new test vector? How is this case not covered in
> existing cases?
> Can you explain in the patch description?
> How is it different than gcm_test_case_aad_2 case and other gcm 128 cases?
> 

The differential is this test vector uses aad of size 64 bytes.
So far, other test vectors have aad sizes of 0, 8, 12, 65296.

Thanks,
Ciara


<snip>


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v7 0/3] add QAT GEN LCE device
  2023-12-20 13:26 [PATCH 1/4] common/qat: add files specific to GEN5 Nishikant Nayak
                   ` (8 preceding siblings ...)
  2024-02-28 14:00 ` [PATCH v6 0/4] add QAT GEN LCE device Nishikant Nayak
@ 2024-02-29 18:43 ` Ciara Power
  2024-02-29 18:43   ` [PATCH v7 1/3] common/qat: add support for " Ciara Power
                     ` (2 more replies)
  2024-02-29 19:45 ` [PATCH v8 0/3] add QAT GEN LCE device Ciara Power
  10 siblings, 3 replies; 47+ messages in thread
From: Ciara Power @ 2024-02-29 18:43 UTC (permalink / raw)
  To: dev; +Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, gakhil

This patchset adds a new QAT LCE device.
The device currently only supports symmetric crypto,
and only the AES-GCM algorithm.

v7:
  - Squashed patch 1 and 2.
  - Fixed formatting to leverage 100 char line limit.
  - Removed unnecessary whitespace and indent changes.
  - Fixed copyright year typo on new file.
  - Added second developer to commit message signed-off tags.
v6:
  - Added documentation and release note changes.
  - Removed unused device PCI ID.
v5:
  - Fixed compilation issue by replacing __u8 with uint8_t.
v4:
  - Fixed cover letter, v3 included the wrong details relating
    to another patchset.
v3:
  - Fixed typos in commit and code comments.
  - Replaced use of linux/kernel.h macro with local macro
    to fix ARM compilation in CI.
v2:
   - Renamed device from GEN 5 to GEN LCE.
   - Removed unused code.
   - Updated macro names.

Nishikant Nayak (3):
  common/qat: add support for GEN LCE device
  crypto/qat: update headers for GEN LCE support
  test/cryptodev: add tests for GCM with 64 byte AAD

 .mailmap                                      |   1 +
 app/test/test_cryptodev.c                     |  43 ++-
 app/test/test_cryptodev_aead_test_vectors.h   |  62 ++++
 doc/guides/cryptodevs/qat.rst                 |   1 +
 doc/guides/rel_notes/release_24_03.rst        |   4 +
 drivers/common/qat/dev/qat_dev_gen_lce.c      | 295 +++++++++++++++++
 drivers/common/qat/meson.build                |   2 +
 .../qat/qat_adf/adf_transport_access_macros.h |   1 +
 .../adf_transport_access_macros_gen_lce.h     |  51 +++
 .../adf_transport_access_macros_gen_lcevf.h   |  48 +++
 drivers/common/qat/qat_adf/icp_qat_fw.h       |  34 ++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    |  59 +++-
 drivers/common/qat/qat_common.h               |   1 +
 drivers/common/qat/qat_device.c               |   5 +
 .../crypto/qat/dev/qat_crypto_pmd_gen_lce.c   | 310 ++++++++++++++++++
 drivers/crypto/qat/qat_sym.c                  |  14 +-
 drivers/crypto/qat/qat_sym.h                  |  57 +++-
 drivers/crypto/qat/qat_sym_session.c          |  57 +++-
 drivers/crypto/qat/qat_sym_session.h          |  10 +-
 19 files changed, 1040 insertions(+), 15 deletions(-)
 create mode 100644 drivers/common/qat/dev/qat_dev_gen_lce.c
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
 create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c

-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v7 1/3] common/qat: add support for GEN LCE device
  2024-02-29 18:43 ` [PATCH v7 0/3] add QAT GEN LCE device Ciara Power
@ 2024-02-29 18:43   ` Ciara Power
  2024-02-29 18:43   ` [PATCH v7 2/3] crypto/qat: update headers for GEN LCE support Ciara Power
  2024-02-29 18:43   ` [PATCH v7 3/3] test/cryptodev: add tests for GCM with 64 byte AAD Ciara Power
  2 siblings, 0 replies; 47+ messages in thread
From: Ciara Power @ 2024-02-29 18:43 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, gakhil,
	Nishikant Nayak, Thomas Monjalon, Anatoly Burakov

From: Nishikant Nayak <nishikanta.nayak@intel.com>

Support is added for a new QAT device generation, GEN LCE.

This generation works slightly differently to previous
generations such as GEN 4, so many new files, functions and
macros are needed specifically for this generation.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Acked-by: Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com>
---
v7:
    - Squashed patch 1 + 2 together.
    - Updated commit message.
    - Added new signed off to cover changes made by
      second developer in v7.
    - Fixed copyright year for new files.
    - Utilised 100 char line limit.
v6:
    - Removed unused PCI device IDs from the device list.
    - Updated documentation and release note.
v5:
    - Replaced usage of __u8 with uint8_t.
v3:
    - Removed use of linux/kernel.h macro to fix ARM compilation.
    - Fixed typo in commit body and code comment.
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
    - Fixed code formatting
---
 .mailmap                                      |   1 +
 doc/guides/cryptodevs/qat.rst                 |   1 +
 doc/guides/rel_notes/release_24_03.rst        |   4 +
 drivers/common/qat/dev/qat_dev_gen_lce.c      | 295 +++++++++++++++++
 drivers/common/qat/meson.build                |   2 +
 .../qat/qat_adf/adf_transport_access_macros.h |   1 +
 .../adf_transport_access_macros_gen_lce.h     |  51 +++
 .../adf_transport_access_macros_gen_lcevf.h   |  48 +++
 drivers/common/qat/qat_adf/icp_qat_fw.h       |  34 ++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    |  59 +++-
 drivers/common/qat/qat_common.h               |   1 +
 drivers/common/qat/qat_device.c               |   5 +
 .../crypto/qat/dev/qat_crypto_pmd_gen_lce.c   | 310 ++++++++++++++++++
 drivers/crypto/qat/qat_sym.h                  |   6 +
 14 files changed, 817 insertions(+), 1 deletion(-)
 create mode 100644 drivers/common/qat/dev/qat_dev_gen_lce.c
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
 create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c

diff --git a/.mailmap b/.mailmap
index 58cca13ef6..8008e5a899 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1036,6 +1036,7 @@ Ning Li <muziding001@163.com> <lining18@jd.com>
 Nipun Gupta <nipun.gupta@amd.com> <nipun.gupta@nxp.com>
 Nir Efrati <nir.efrati@intel.com>
 Nirmoy Das <ndas@suse.de>
+Nishikant Nayak <nishikanta.nayak@intel.com>
 Nithin Dabilpuram <ndabilpuram@marvell.com> <nithin.dabilpuram@caviumnetworks.com>
 Nitin Saxena <nitin.saxena@caviumnetworks.com>
 Nitzan Weller <nitzanwe@mellanox.com>
diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index dc6b95165d..d9adbfc71e 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -26,6 +26,7 @@ poll mode crypto driver support for the following hardware accelerator devices:
 * ``Intel QuickAssist Technology D15xx``
 * ``Intel QuickAssist Technology C4xxx``
 * ``Intel QuickAssist Technology 4xxx``
+* ``Intel QuickAssist Technology apfxx``
 
 
 Features
diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 879bb4944c..41dccbb0c1 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -131,6 +131,10 @@ New Features
   * Added support for comparing result between packet fields or value.
   * Added support for accumulating value of field into another one.
 
+* **Updated Intel QuickAssist Technology driver.**
+
+  * Added support for GEN LCE (1454) device, for AES-GCM only.
+
 * **Updated Marvell cnxk crypto driver.**
 
   * Added support for Rx inject in crypto_cn10k.
diff --git a/drivers/common/qat/dev/qat_dev_gen_lce.c b/drivers/common/qat/dev/qat_dev_gen_lce.c
new file mode 100644
index 0000000000..6514321c32
--- /dev/null
+++ b/drivers/common/qat/dev/qat_dev_gen_lce.c
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_pci.h>
+#include <rte_vfio.h>
+
+#include "qat_device.h"
+#include "qat_qp.h"
+#include "adf_transport_access_macros_gen_lcevf.h"
+#include "adf_pf2vf_msg.h"
+#include "qat_pf2vf.h"
+
+#include <stdint.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#define BITS_PER_ULONG		(sizeof(unsigned long) * 8)
+
+#define VFIO_PCI_LCE_DEVICE_CFG_REGION_INDEX	VFIO_PCI_NUM_REGIONS
+#define VFIO_PCI_LCE_CY_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 2)
+#define VFIO_PCI_LCE_RING_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 4)
+#define LCE_DEVICE_NAME_SIZE			64
+#define LCE_DEVICE_MAX_BANKS			2080
+#define LCE_DIV_ROUND_UP(n, d)  (((n) + (d) - 1) / (d))
+#define LCE_DEVICE_BITMAP_SIZE	LCE_DIV_ROUND_UP(LCE_DEVICE_MAX_BANKS, BITS_PER_ULONG)
+
+/* QAT GEN_LCE specific macros */
+#define QAT_GEN_LCE_BUNDLE_NUM		LCE_DEVICE_MAX_BANKS
+#define QAT_GEN4_QPS_PER_BUNDLE_NUM	1
+
+/**
+ * struct lce_vfio_dev_cap - LCE device capabilities
+ *
+ * Device level capabilities and service level capabilities
+ */
+struct lce_vfio_dev_cap {
+	uint16_t device_num;
+	uint16_t device_type;
+	uint32_t capability_mask;
+	uint32_t extended_capabilities;
+	uint16_t max_banks;
+	uint16_t max_rings_per_bank;
+	uint16_t arb_mask;
+	uint16_t services;
+	uint16_t pkg_id;
+	uint16_t node_id;
+	uint8_t device_name[LCE_DEVICE_NAME_SIZE];
+};
+
+/* struct lce_vfio_dev_cy_cap - CY capabilities of LCE device */
+struct lce_vfio_dev_cy_cap {
+	uint32_t nr_banks;
+	unsigned long bitmap[LCE_DEVICE_BITMAP_SIZE];
+};
+
+struct lce_qat_domain {
+	uint32_t nid        :3;
+	uint32_t fid        :7;
+	uint32_t ftype      :2;
+	uint32_t vfid       :13;
+	uint32_t rid        :4;
+	uint32_t vld        :1;
+	uint32_t desc_over  :1;
+	uint32_t pasid_vld  :1;
+	uint32_t pasid      :20;
+};
+
+struct lce_qat_buf_domain {
+	uint32_t bank_id:   20;
+	uint32_t type:      4;
+	uint32_t resv:      8;
+	struct lce_qat_domain dom;
+};
+
+struct qat_dev_gen_lce_extra {
+	struct qat_qp_hw_data
+	    qp_gen_lce_data[QAT_GEN_LCE_BUNDLE_NUM][QAT_GEN4_QPS_PER_BUNDLE_NUM];
+};
+
+static struct qat_pf2vf_dev qat_pf2vf_gen_lce = {
+	.pf2vf_offset = ADF_4XXXIOV_PF2VM_OFFSET,
+	.vf2pf_offset = ADF_4XXXIOV_VM2PF_OFFSET,
+	.pf2vf_type_shift = ADF_PFVF_2X_MSGTYPE_SHIFT,
+	.pf2vf_type_mask = ADF_PFVF_2X_MSGTYPE_MASK,
+	.pf2vf_data_shift = ADF_PFVF_2X_MSGDATA_SHIFT,
+	.pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK,
+};
+
+static int
+qat_select_valid_queue_gen_lce(struct qat_pci_device *qat_dev, int qp_id,
+			    enum qat_service_type service_type)
+{
+	int i = 0, valid_qps = 0;
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+	for (; i < QAT_GEN_LCE_BUNDLE_NUM; i++) {
+		if (dev_extra->qp_gen_lce_data[i][0].service_type == service_type) {
+			if (valid_qps == qp_id)
+				return i;
+			++valid_qps;
+		}
+	}
+	return -1;
+}
+
+static const struct qat_qp_hw_data *
+qat_qp_get_hw_data_gen_lce(struct qat_pci_device *qat_dev,
+			enum qat_service_type service_type, uint16_t qp_id)
+{
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+	int ring_pair = qat_select_valid_queue_gen_lce(qat_dev, qp_id, service_type);
+
+	if (ring_pair < 0)
+		return NULL;
+
+	return &dev_extra->qp_gen_lce_data[ring_pair][0];
+}
+
+static int
+qat_qp_rings_per_service_gen_lce(struct qat_pci_device *qat_dev,
+			      enum qat_service_type service)
+{
+	int i = 0, count = 0, max_ops_per_srv = 0;
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+	max_ops_per_srv = QAT_GEN_LCE_BUNDLE_NUM;
+	for (i = 0, count = 0; i < max_ops_per_srv; i++)
+		if (dev_extra->qp_gen_lce_data[i][0].service_type == service)
+			count++;
+	return count;
+}
+
+static int qat_dev_read_config_gen_lce(struct qat_pci_device *qat_dev)
+{
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+	struct qat_qp_hw_data *hw_data;
+
+	/** Enable only crypto ring: RP-0 */
+	hw_data = &dev_extra->qp_gen_lce_data[0][0];
+	memset(hw_data, 0, sizeof(*hw_data));
+
+	hw_data->service_type = QAT_SERVICE_SYMMETRIC;
+	hw_data->tx_msg_size = 128;
+	hw_data->rx_msg_size = 32;
+
+	hw_data->tx_ring_num = 0;
+	hw_data->rx_ring_num = 1;
+
+	hw_data->hw_bundle_num = 0;
+
+	return 0;
+}
+
+static void qat_qp_build_ring_base_gen_lce(void *io_addr, struct qat_queue *queue)
+{
+	uint64_t queue_base;
+
+	queue_base = BUILD_RING_BASE_ADDR_GEN_LCE(queue->base_phys_addr, queue->queue_size);
+	WRITE_CSR_RING_BASE_GEN_LCEVF(io_addr, queue->hw_bundle_number,
+			queue->hw_queue_number, queue_base);
+}
+
+static void
+qat_qp_adf_arb_enable_gen_lce(const struct qat_queue *txq,
+			   void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+			(ADF_RING_BUNDLE_SIZE_GEN_LCE * txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, arb_csr_offset);
+	value |= 0x01;
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_arb_disable_gen_lce(const struct qat_queue *txq, void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+			(ADF_RING_BUNDLE_SIZE_GEN_LCE * txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, arb_csr_offset);
+	value &= ~(0x01);
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_configure_queues_gen_lce(struct qat_qp *qp)
+{
+	uint32_t q_tx_config, q_resp_config;
+	struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q;
+
+	/* q_tx/rx->queue_size is initialized as per bundle config register */
+	q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size);
+
+	q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size,
+					       ADF_RING_NEAR_WATERMARK_512,
+					       ADF_RING_NEAR_WATERMARK_0);
+
+	WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_tx->hw_bundle_number,
+			q_tx->hw_queue_number, q_tx_config);
+	WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_rx->hw_bundle_number,
+			q_rx->hw_queue_number, q_resp_config);
+}
+
+static void
+qat_qp_csr_write_tail_gen_lce(struct qat_qp *qp, struct qat_queue *q)
+{
+	WRITE_CSR_RING_TAIL_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, q->tail);
+}
+
+static void
+qat_qp_csr_write_head_gen_lce(struct qat_qp *qp, struct qat_queue *q, uint32_t new_head)
+{
+	WRITE_CSR_RING_HEAD_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, new_head);
+}
+
+static void
+qat_qp_csr_setup_gen_lce(struct qat_pci_device *qat_dev, void *io_addr, struct qat_qp *qp)
+{
+	qat_qp_build_ring_base_gen_lce(io_addr, &qp->tx_q);
+	qat_qp_build_ring_base_gen_lce(io_addr, &qp->rx_q);
+	qat_qp_adf_configure_queues_gen_lce(qp);
+	qat_qp_adf_arb_enable_gen_lce(&qp->tx_q, qp->mmap_bar_addr, &qat_dev->arb_csr_lock);
+}
+
+static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen_lce = {
+	.qat_qp_rings_per_service = qat_qp_rings_per_service_gen_lce,
+	.qat_qp_build_ring_base = qat_qp_build_ring_base_gen_lce,
+	.qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen_lce,
+	.qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen_lce,
+	.qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen_lce,
+	.qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen_lce,
+	.qat_qp_csr_write_head = qat_qp_csr_write_head_gen_lce,
+	.qat_qp_csr_setup = qat_qp_csr_setup_gen_lce,
+	.qat_qp_get_hw_data = qat_qp_get_hw_data_gen_lce,
+};
+
+static int
+qat_reset_ring_pairs_gen_lce(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static const struct rte_mem_resource*
+qat_dev_get_transport_bar_gen_lce(struct rte_pci_device *pci_dev)
+{
+	return &pci_dev->mem_resource[0];
+}
+
+static int
+qat_dev_get_misc_bar_gen_lce(struct rte_mem_resource **mem_resource,
+			  struct rte_pci_device *pci_dev)
+{
+	*mem_resource = &pci_dev->mem_resource[2];
+	return 0;
+}
+
+static int
+qat_dev_get_extra_size_gen_lce(void)
+{
+	return sizeof(struct qat_dev_gen_lce_extra);
+}
+
+static int
+qat_dev_get_slice_map_gen_lce(uint32_t *map __rte_unused,
+	const struct rte_pci_device *pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen_lce = {
+	.qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen_lce,
+	.qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen_lce,
+	.qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen_lce,
+	.qat_dev_read_config = qat_dev_read_config_gen_lce,
+	.qat_dev_get_extra_size = qat_dev_get_extra_size_gen_lce,
+	.qat_dev_get_slice_map = qat_dev_get_slice_map_gen_lce,
+};
+
+RTE_INIT(qat_dev_gen_lce_init)
+{
+	qat_qp_hw_spec[QAT_GEN_LCE] = &qat_qp_hw_spec_gen_lce;
+	qat_dev_hw_spec[QAT_GEN_LCE] = &qat_dev_hw_spec_gen_lce;
+	qat_gen_config[QAT_GEN_LCE].dev_gen = QAT_GEN_LCE;
+	qat_gen_config[QAT_GEN_LCE].pf2vf_dev = &qat_pf2vf_gen_lce;
+}
diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 62abcb6fe3..bc7c3e5b85 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -82,6 +82,7 @@ sources += files(
         'dev/qat_dev_gen2.c',
         'dev/qat_dev_gen3.c',
         'dev/qat_dev_gen4.c',
+        'dev/qat_dev_gen_lce.c',
 )
 includes += include_directories(
         'qat_adf',
@@ -108,6 +109,7 @@ if qat_crypto
             'dev/qat_crypto_pmd_gen2.c',
             'dev/qat_crypto_pmd_gen3.c',
             'dev/qat_crypto_pmd_gen4.c',
+            'dev/qat_crypto_pmd_gen_lce.c',
         ]
         sources += files(join_paths(qat_crypto_relpath, f))
     endforeach
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
index 12a7258c60..19bd812419 100644
--- a/drivers/common/qat/qat_adf/adf_transport_access_macros.h
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
@@ -47,6 +47,7 @@
 #define ADF_RING_SIZE_512 0x03
 #define ADF_RING_SIZE_4K 0x06
 #define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_64K 0x0A
 #define ADF_RING_SIZE_4M 0x10
 #define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
 #define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
new file mode 100644
index 0000000000..eac0d30f49
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+
+#include "adf_transport_access_macros.h"
+
+#define ADF_RINGS_PER_INT_SRCSEL_GEN4 2
+#define ADF_BANK_INT_SRC_SEL_MASK_GEN4 0x44UL
+#define ADF_BANK_INT_FLAG_CLEAR_MASK_GEN4 0x3
+#define ADF_RING_BUNDLE_SIZE_GEN_LCE 0x2000
+#define ADF_RING_CSR_RING_CONFIG_GEN_LCE 0x1000
+#define ADF_RING_CSR_RING_LBASE_GEN_LCE 0x1040
+#define ADF_RING_CSR_RING_UBASE_GEN_LCE 0x1080
+
+#define BUILD_RING_BASE_ADDR_GEN_LCE(addr, size) \
+	((((addr) >> 6) & (0xFFFFFFFFFFFFFFFFULL << (size))) << 6)
+
+#define WRITE_CSR_RING_BASE_GEN_LCE(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32);	\
+	ADF_CSR_WR(csr_base_addr,	\
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) +			\
+		ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2),		\
+		l_base);						\
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) +			\
+		ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+
+#define WRITE_CSR_RING_HEAD_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+
+#endif
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
new file mode 100644
index 0000000000..3c7232de12
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+
+#include "adf_transport_access_macros.h"
+#include "adf_transport_access_macros_gen_lce.h"
+
+#define ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF 0x0
+
+#define WRITE_CSR_RING_BASE_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2),	\
+		l_base);	\
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_HEAD_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_SRV_ARB_EN_GEN_LCEVF(csr_base_addr, bank, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+
+#endif
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw.h b/drivers/common/qat/qat_adf/icp_qat_fw.h
index 3aa17ae041..b78158e01d 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw.h
@@ -57,6 +57,12 @@ struct icp_qat_fw_comn_req_hdr_cd_pars {
 	} u;
 };
 
+struct lce_key_buff_desc {
+	uint64_t keybuff;
+	uint32_t keybuff_resrvd1;
+	uint32_t keybuff_resrvd2;
+};
+
 struct icp_qat_fw_comn_req_mid {
 	uint64_t opaque_data;
 	uint64_t src_data_addr;
@@ -123,6 +129,12 @@ struct icp_qat_fw_comn_resp {
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_BITPOS 0
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_MASK 0x1
 
+/* GEN_LCE specific Common Header fields */
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS 5
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_MASK 0x3
+#define ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT 3
+#define ICP_QAT_FW_COMN_GEN_LCE_STATUS_FLAG_ERROR 0
+
 #define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
 	icp_qat_fw_comn_req_hdr_t.service_type
 
@@ -168,6 +180,12 @@ struct icp_qat_fw_comn_resp {
 	(((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
 	 ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
 
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(valid, desc_layout) \
+	((((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+	ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) | \
+	(((desc_layout) & ICP_QAT_FW_COMN_DESC_LAYOUT_MASK) << \
+	ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS))
+
 #define QAT_COMN_PTR_TYPE_BITPOS 0
 #define QAT_COMN_PTR_TYPE_MASK 0x1
 #define QAT_COMN_CD_FLD_TYPE_BITPOS 1
@@ -180,10 +198,20 @@ struct icp_qat_fw_comn_resp {
 #define QAT_COMN_EXT_FLAGS_MASK 0x1
 #define QAT_COMN_EXT_FLAGS_USED 0x1
 
+/* GEN_LCE specific Common Request Flags fields */
+#define QAT_COMN_KEYBUF_USAGE_BITPOS 1
+#define QAT_COMN_KEYBUF_USAGE_MASK 0x1
+#define QAT_COMN_KEY_BUFFER_USED 1
+
 #define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
 	((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
 	 | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
 
+#define ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(ptr, keybuf) \
+	((((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS) | \
+	 (((keybuf) & QAT_COMN_PTR_TYPE_MASK) << \
+	   QAT_COMN_KEYBUF_USAGE_BITPOS))
+
 #define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
 	QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
 
@@ -249,6 +277,8 @@ struct icp_qat_fw_comn_resp {
 #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1
+#define QAT_COMN_RESP_INVALID_PARAM_BITPOS 1
+#define QAT_COMN_RESP_INVALID_PARAM_MASK 0x1
 #define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0
 #define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1
 
@@ -280,6 +310,10 @@ struct icp_qat_fw_comn_resp {
 	QAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \
 	QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK)
 
+#define ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_INVALID_PARAM_BITPOS, \
+	QAT_COMN_RESP_INVALID_PARAM_MASK)
+
 #define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
 #define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
 #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index 70f0effa62..eba9f96685 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -22,14 +22,24 @@ enum icp_qat_fw_la_cmd_id {
 	ICP_QAT_FW_LA_CMD_DELIMITER = 18
 };
 
+/* In GEN_LCE Command ID 4 corresponds to AEAD */
+#define ICP_QAT_FW_LA_CMD_AEAD 4
+
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 #define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 
+/* GEN_LCE Hash, HMAC and GCM Verification Status */
+#define ICP_QAT_FW_LA_VER_STATUS_FAIL ICP_QAT_FW_COMN_GEN_LCE_STATUS_FLAG_ERROR
+
+
 struct icp_qat_fw_la_bulk_req {
 	struct icp_qat_fw_comn_req_hdr comn_hdr;
-	struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+	union {
+		struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+		struct lce_key_buff_desc key_buff;
+	};
 	struct icp_qat_fw_comn_req_mid comn_mid;
 	struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
 	struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
@@ -81,6 +91,21 @@ struct icp_qat_fw_la_bulk_req {
 #define ICP_QAT_FW_LA_PARTIAL_END 2
 #define QAT_LA_PARTIAL_BITPOS 0
 #define QAT_LA_PARTIAL_MASK 0x3
+
+/* GEN_LCE specific Crypto Flags fields */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS 6
+#define ICP_QAT_FW_SYM_AEAD_ALGO_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_SIZE_BITPOS 9
+#define ICP_QAT_FW_SYM_IV_SIZE_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS 11
+#define ICP_QAT_FW_SYM_IV_IN_DESC_MASK 0x1
+#define ICP_QAT_FW_SYM_IV_IN_DESC_VALID 1
+#define ICP_QAT_FW_SYM_DIRECTION_BITPOS 15
+#define ICP_QAT_FW_SYM_DIRECTION_MASK 0x1
+
+/* In GEN_LCE AEAD AES GCM Algorithm has ID 0 */
+#define QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE 0
+
 #define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
 	cmp_auth, ret_auth, update_state, \
 	ciph_iv, ciphcfg, partial) \
@@ -188,6 +213,23 @@ struct icp_qat_fw_la_bulk_req {
 	QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
 	QAT_LA_PARTIAL_MASK)
 
+/* GEN_LCE specific Crypto Flags operations */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS, \
+	ICP_QAT_FW_SYM_AEAD_ALGO_MASK)
+
+#define ICP_QAT_FW_SYM_IV_SIZE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_SIZE_BITPOS, \
+	ICP_QAT_FW_SYM_IV_SIZE_MASK)
+
+#define ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS, \
+	ICP_QAT_FW_SYM_IV_IN_DESC_MASK)
+
+#define ICP_QAT_FW_SYM_DIR_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_DIRECTION_BITPOS, \
+	ICP_QAT_FW_SYM_DIRECTION_MASK)
+
 #define QAT_FW_LA_MODE2 1
 #define QAT_FW_LA_NO_MODE2 0
 #define QAT_FW_LA_MODE2_MASK 0x1
@@ -410,4 +452,19 @@ struct icp_qat_fw_la_cipher_20_req_params {
 	uint8_t    spc_auth_res_sz;
 };
 
+struct icp_qat_fw_la_cipher_30_req_params {
+		uint32_t   spc_aad_sz;
+		uint8_t    cipher_length;
+		uint8_t    reserved[2];
+		uint8_t    spc_auth_res_sz;
+		union {
+				uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+				struct {
+						uint64_t cipher_IV_ptr;
+						uint64_t resrvd1;
+			} s;
+
+		} u;
+};
+
 #endif
diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h
index 9411a79301..642e009f28 100644
--- a/drivers/common/qat/qat_common.h
+++ b/drivers/common/qat/qat_common.h
@@ -21,6 +21,7 @@ enum qat_device_gen {
 	QAT_GEN2,
 	QAT_GEN3,
 	QAT_GEN4,
+	QAT_GEN_LCE,
 	QAT_N_GENS
 };
 
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index f55dc3c6f0..6e23b9e35c 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -62,6 +62,9 @@ static const struct rte_pci_id pci_id_qat_map[] = {
 		{
 			RTE_PCI_DEVICE(0x8086, 0x4945),
 		},
+		{
+			RTE_PCI_DEVICE(0x8086, 0x1454),
+		},
 		{.device_id = 0},
 };
 
@@ -199,6 +202,8 @@ pick_gen(const struct rte_pci_device *pci_dev)
 	case 0x4943:
 	case 0x4945:
 		return QAT_GEN4;
+	case 0x1454:
+		return QAT_GEN_LCE;
 	default:
 		QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
 		return QAT_N_GENS;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
new file mode 100644
index 0000000000..7298916f2a
--- /dev/null
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
+#include "qat_sym_session.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
+#include "qat_crypto.h"
+#include "qat_crypto_pmd_gens.h"
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen_lce[] = {
+	QAT_SYM_AEAD_CAP(AES_GCM,
+		CAP_SET(block_size, 16),
+		CAP_RNG(key_size, 32, 32, 0), CAP_RNG(digest_size, 16, 16, 0),
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+qat_sgl_add_buffer_gen_lce(void *list_in, uint64_t addr, uint32_t len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr;
+
+	nr = list->num_bufs;
+
+	if (nr >= QAT_SYM_SGL_MAX_NUMBER) {
+		QAT_DP_LOG(ERR, "Adding %d entry failed, no empty SGL buffer", nr);
+		return -EINVAL;
+	}
+
+	list->buffers[nr].len = len;
+	list->buffers[nr].resrvd = 0;
+	list->buffers[nr].addr = addr;
+
+	list->num_bufs++;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+	QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+		nr, list->buffers[nr].len, list->buffers[nr].addr);
+#endif
+	return 0;
+}
+
+static int
+qat_sgl_fill_array_with_mbuf(struct rte_mbuf *buf, int64_t offset,
+		void *list_in, uint32_t data_len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr, buf_len;
+	int res = -EINVAL;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	uint32_t start_idx = list->num_bufs;
+#endif
+
+	/* Append to the existing list */
+	nr = list->num_bufs;
+
+	for (buf_len = 0; buf && nr < QAT_SYM_SGL_MAX_NUMBER; buf = buf->next) {
+		if (offset >= rte_pktmbuf_data_len(buf)) {
+			offset -= rte_pktmbuf_data_len(buf);
+			/* Jump to next mbuf */
+			continue;
+		}
+
+		list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset;
+		list->buffers[nr].resrvd = 0;
+		list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
+
+		offset = 0;
+		buf_len += list->buffers[nr].len;
+
+		if (buf_len >= data_len) {
+			list->buffers[nr].len -= buf_len - data_len;
+			res = 0;
+			break;
+		}
+		++nr;
+	}
+
+	if (unlikely(res != 0)) {
+		if (nr == QAT_SYM_SGL_MAX_NUMBER)
+			QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+					QAT_SYM_SGL_MAX_NUMBER);
+		else
+			QAT_DP_LOG(ERR, "Mbuf chain is too short");
+	} else {
+
+		list->num_bufs = ++nr;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+		for (nr = start_idx; nr < list->num_bufs; nr++) {
+			QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+					nr, list->buffers[nr].len,
+					list->buffers[nr].addr);
+		}
+#endif
+	}
+
+	return res;
+}
+
+static int
+qat_sym_build_op_aead_gen_lce(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_op *op = in_op;
+	uint64_t digest_phys_addr, aad_phys_addr;
+	uint16_t iv_len, aad_len, digest_len, key_len;
+	uint32_t cipher_ofs, iv_offset, cipher_len;
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct icp_qat_fw_la_cipher_30_req_params *cipher_param;
+	enum icp_qat_hw_cipher_dir dir;
+	bool is_digest_adjacent = false;
+
+	if (ctx->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER ||
+		ctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_AES256 ||
+		ctx->qat_mode != ICP_QAT_HW_CIPHER_AEAD_MODE) {
+
+		QAT_DP_LOG(ERR, "Not supported (cmd: %d, alg: %d, mode: %d). "
+			"GEN_LCE PMD only supports AES-256 AEAD mode",
+			ctx->qat_cmd, ctx->qat_cipher_alg, ctx->qat_mode);
+		return -EINVAL;
+	}
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+	cipher_param = (void *)&qat_req->serv_specif_rqpars;
+
+	dir = ctx->qat_dir;
+
+	aad_phys_addr = op->sym->aead.aad.phys_addr;
+	aad_len = ctx->aad_len;
+
+	iv_offset = ctx->cipher_iv.offset;
+	iv_len = ctx->cipher_iv.length;
+
+	cipher_ofs = op->sym->aead.data.offset;
+	cipher_len = op->sym->aead.data.length;
+
+	digest_phys_addr = op->sym->aead.digest.phys_addr;
+	digest_len = ctx->digest_length;
+
+	/* Up to 16B IV can be directly embedded in descriptor.
+	 *  GCM supports only 12B IV for GEN LCE
+	 */
+	if (iv_len != GCM_IV_LENGTH_GEN_LCE) {
+		QAT_DP_LOG(ERR, "iv_len: %d not supported. Must be 12B.", iv_len);
+		return -EINVAL;
+	}
+
+	rte_memcpy(cipher_param->u.cipher_IV_array,
+			rte_crypto_op_ctod_offset(op, uint8_t*, iv_offset), iv_len);
+
+	/* Always SGL */
+	RTE_ASSERT((qat_req->comn_hdr.comn_req_flags & ICP_QAT_FW_SYM_COMM_ADDR_SGL) == 1);
+	/* Always inplace */
+	RTE_ASSERT(op->sym->m_dst == NULL);
+
+	/* Key buffer address is already programmed by reusing the
+	 * content-descriptor buffer
+	 */
+	key_len = ctx->auth_key_length;
+
+	cipher_param->spc_aad_sz = aad_len;
+	cipher_param->cipher_length = key_len;
+	cipher_param->spc_auth_res_sz = digest_len;
+
+	/* Knowing digest is contiguous to cipher-text helps optimizing SGL */
+	if (rte_pktmbuf_iova_offset(op->sym->m_src, cipher_ofs + cipher_len) == digest_phys_addr)
+		is_digest_adjacent = true;
+
+	/* SRC-SGL: 3 entries:
+	 * a) AAD
+	 * b) cipher
+	 * c) digest (only for decrypt and buffer is_NOT_adjacent)
+	 *
+	 */
+	cookie->qat_sgl_src.num_bufs = 0;
+	if (aad_len)
+		qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src, aad_phys_addr, aad_len);
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_src,
+				cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_src,
+				cipher_len);
+
+		/* Digest buffer in decrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_DECRYPT)
+			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src,
+					digest_phys_addr, digest_len);
+	}
+
+	/* (in-place) DST-SGL: 2 entries:
+	 * a) cipher
+	 * b) digest (only for encrypt and buffer is_NOT_adjacent)
+	 */
+	cookie->qat_sgl_dst.num_bufs = 0;
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_dst,
+				cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_dst,
+				cipher_len);
+
+		/* Digest buffer in Encrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_dst,
+					digest_phys_addr, digest_len);
+	}
+
+	/* Length values in 128B descriptor */
+	qat_req->comn_mid.src_length = cipher_len;
+	qat_req->comn_mid.dst_length = cipher_len;
+
+	if (dir == ICP_QAT_HW_CIPHER_ENCRYPT) /* Digest buffer in Encrypt job */
+		qat_req->comn_mid.dst_length += GCM_256_DIGEST_LEN;
+
+	/* src & dst SGL addresses in 128B descriptor */
+	qat_req->comn_mid.src_data_addr = cookie->qat_sgl_src_phys_addr;
+	qat_req->comn_mid.dest_data_addr = cookie->qat_sgl_dst_phys_addr;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, sizeof(struct icp_qat_fw_la_bulk_req));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+			rte_pktmbuf_data_len(op->sym->m_src));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data, digest_len);
+	QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data, aad_len);
+#endif
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen_lce(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+
+	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
+		return -EINVAL;
+
+	/* build request for aead */
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES256 &&
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) {
+		build_request = qat_sym_build_op_aead_gen_lce;
+		ctx->build_request[proc_type] = build_request;
+	}
+	return 0;
+}
+
+
+static int
+qat_sym_crypto_cap_get_gen_lce(struct qat_cryptodev_private *internals,
+	const char *capa_memz_name,
+	const uint16_t __rte_unused slice_map)
+{
+	const uint32_t size = sizeof(qat_sym_crypto_caps_gen_lce);
+	uint32_t i;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name, size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG, "Error allocating memzone for capabilities");
+			return -1;
+		}
+	}
+
+	struct rte_cryptodev_capabilities *addr =
+		(struct rte_cryptodev_capabilities *)
+		internals->capa_mz->addr;
+	const struct rte_cryptodev_capabilities *capabilities =
+		qat_sym_crypto_caps_gen_lce;
+	const uint32_t capa_num = size / sizeof(struct rte_cryptodev_capabilities);
+	uint32_t curr_capa = 0;
+
+	for (i = 0; i < capa_num; i++) {
+		memcpy(addr + curr_capa, capabilities + i,
+				sizeof(struct rte_cryptodev_capabilities));
+		curr_capa++;
+	}
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	return 0;
+}
+
+RTE_INIT(qat_sym_crypto_gen_lce_init)
+{
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = &qat_sym_crypto_ops_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_capabilities = qat_sym_crypto_cap_get_gen_lce;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_session = qat_sym_crypto_set_session_gen_lce;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_raw_dp_ctx = NULL;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags = qat_sym_crypto_feature_flags_get_gen1;
+}
+
+RTE_INIT(qat_asym_crypto_gen_lce_init)
+{
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_capabilities = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].set_session = NULL;
+}
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f976009bf2..f2f197d050 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -95,6 +95,12 @@
 /* Maximum data length for single pass GMAC: 2^14-1 */
 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
 
+/* Digest length for GCM Algo is 16 bytes */
+#define GCM_256_DIGEST_LEN 16
+
+/* IV length for GCM algo is 12 bytes */
+#define GCM_IV_LENGTH_GEN_LCE 12
+
 struct qat_sym_session;
 
 struct qat_sym_sgl {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v7 2/3] crypto/qat: update headers for GEN LCE support
  2024-02-29 18:43 ` [PATCH v7 0/3] add QAT GEN LCE device Ciara Power
  2024-02-29 18:43   ` [PATCH v7 1/3] common/qat: add support for " Ciara Power
@ 2024-02-29 18:43   ` Ciara Power
  2024-02-29 18:43   ` [PATCH v7 3/3] test/cryptodev: add tests for GCM with 64 byte AAD Ciara Power
  2 siblings, 0 replies; 47+ messages in thread
From: Ciara Power @ 2024-02-29 18:43 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, gakhil,
	Nishikant Nayak

From: Nishikant Nayak <nishikanta.nayak@intel.com>

This patch handles the changes required for updating the common
header fields specific to GEN LCE, Also added/updated of the response
processing APIs based on GEN LCE requirement.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Acked-by: Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com>
---
v7:
    - Removed unnecessary whitespace and indent changes.
    - Added signed-off for second developer that worked on v7.
    - Utilised 100 char line limit.
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
    - Added GEN LCE specific API for deque burst.
    - Fixed code formatting.
---
 drivers/crypto/qat/qat_sym.c         | 14 ++++++-
 drivers/crypto/qat/qat_sym.h         | 51 ++++++++++++++++++++++++-
 drivers/crypto/qat/qat_sym_session.c | 57 ++++++++++++++++++++++++++--
 drivers/crypto/qat/qat_sym_session.h | 10 ++++-
 4 files changed, 124 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 6e03bde841..9113dfef56 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -180,7 +180,13 @@ qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
 	return qat_dequeue_op_burst(qp, (void **)ops,
-				qat_sym_process_response, nb_ops);
+							qat_sym_process_response, nb_ops);
+}
+
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_sym_process_response_gen_lce, nb_ops);
 }
 
 int
@@ -200,6 +206,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
 	struct rte_cryptodev *cryptodev;
 	struct qat_cryptodev_private *internals;
+	enum qat_device_gen qat_dev_gen = qat_pci_dev->qat_dev_gen;
 	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
 		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
 
@@ -249,7 +256,10 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
 	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
-	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
+	if (qat_dev_gen == QAT_GEN_LCE)
+		cryptodev->dequeue_burst = qat_sym_dequeue_burst_gen_lce;
+	else
+		cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
 	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f2f197d050..6616064251 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -142,6 +142,9 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops);
+
 #ifdef RTE_QAT_OPENSSL
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
@@ -390,6 +393,46 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
 	return 1;
 }
 
+static __rte_always_inline int
+qat_sym_process_response_gen_lce(void **op, uint8_t *resp, void *op_cookie __rte_unused,
+		uint64_t *dequeue_err_count __rte_unused)
+{
+	struct icp_qat_fw_comn_resp *resp_msg = (struct icp_qat_fw_comn_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t) (resp_msg->opaque_data);
+	struct qat_sym_session *sess;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+			sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+	sess = CRYPTODEV_GET_SYM_SESS_PRIV(rx_op->sym->session);
+
+	rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+	else if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+	if (sess->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		if (ICP_QAT_FW_LA_VER_STATUS_FAIL == ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+				resp_msg->comn_hdr.comn_status))
+			rx_op->status =	RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	}
+
+	*op = (void *)rx_op;
+
+	/*
+	 * return 1 as dequeue op only move on to the next op
+	 * if one was ready to return to API
+	 */
+	return 1;
+}
+
 int
 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
@@ -455,7 +498,13 @@ qat_sym_preprocess_requests(void **ops __rte_unused,
 
 static inline void
 qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
-	void *op_cookie __rte_unused)
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
+{
+}
+
+static inline void
+qat_sym_process_response_gen_lce(void **op __rte_unused, uint8_t *resp __rte_unused,
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
 {
 }
 
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 9f4f6c3d93..0b7e315573 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -136,6 +136,9 @@ qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 static void
 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session);
+
 /* Req/cd init functions */
 
 static void
@@ -738,6 +741,12 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		session->qat_cmd);
 		return -ENOTSUP;
 	}
+
+	if (qat_dev_gen == QAT_GEN_LCE) {
+		qat_sym_session_init_gen_lce_hdr(session);
+		return 0;
+	}
+
 	qat_sym_session_finalize(session);
 
 	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
@@ -1016,6 +1025,12 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 			dev->data->dev_private;
 	enum qat_device_gen qat_dev_gen =
 			internals->qat_dev->qat_dev_gen;
+	if (qat_dev_gen == QAT_GEN_LCE) {
+		struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+		struct lce_key_buff_desc *key_buff = &req_tmpl->key_buff;
+
+		key_buff->keybuff = session->key_paddr;
+	}
 
 	/*
 	 * Store AEAD IV parameters as cipher IV,
@@ -1079,9 +1094,14 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 	}
 
 	if (session->is_single_pass) {
-		if (qat_sym_cd_cipher_set(session,
-				aead_xform->key.data, aead_xform->key.length))
-			return -EINVAL;
+		if (qat_dev_gen != QAT_GEN_LCE) {
+			if (qat_sym_cd_cipher_set(session,
+					aead_xform->key.data, aead_xform->key.length))
+				return -EINVAL;
+		} else {
+			session->auth_key_length = aead_xform->key.length;
+			memcpy(session->key_array, aead_xform->key.data, aead_xform->key.length);
+		}
 	} else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
 			aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
 			(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
@@ -1970,6 +1990,37 @@ qat_sym_session_init_common_hdr(struct qat_sym_session *session)
 					ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
 }
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session)
+{
+	struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+	/*
+	 * GEN_LCE specifies separate command id for AEAD operations but Cryptodev
+	 * API processes AEAD operations as Single pass Crypto operations.
+	 * Hence even for GEN_LCE, Session Algo Command ID is CIPHER.
+	 * Note, however Session Algo Mode is AEAD.
+	 */
+	header->service_cmd_id = ICP_QAT_FW_LA_CMD_AEAD;
+	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+	header->hdr_flags = ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(ICP_QAT_FW_COMN_REQ_FLAG_SET,
+			ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT);
+	header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(QAT_COMN_PTR_TYPE_SGL,
+			QAT_COMN_KEY_BUFFER_USED);
+
+	ICP_QAT_FW_SYM_AEAD_ALGO_SET(header->serv_specif_flags, QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE);
+	ICP_QAT_FW_SYM_IV_SIZE_SET(header->serv_specif_flags, ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+	ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(header->serv_specif_flags,
+			ICP_QAT_FW_SYM_IV_IN_DESC_VALID);
+
+	if (session->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags, ICP_QAT_HW_CIPHER_DECRYPT);
+	} else {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags, ICP_QAT_HW_CIPHER_ENCRYPT);
+	}
+}
+
 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 						const uint8_t *cipherkey,
 						uint32_t cipherkeylen)
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 9209e2e8df..958af03405 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -111,10 +111,16 @@ struct qat_sym_session {
 	enum icp_qat_hw_auth_op auth_op;
 	enum icp_qat_hw_auth_mode auth_mode;
 	void *bpi_ctx;
-	struct qat_sym_cd cd;
+	union {
+		struct qat_sym_cd cd;
+		uint8_t key_array[32];
+	};
 	uint8_t prefix_state[QAT_PREFIX_TBL_SIZE] __rte_cache_aligned;
 	uint8_t *cd_cur_ptr;
-	phys_addr_t cd_paddr;
+	union {
+		phys_addr_t cd_paddr;
+		phys_addr_t key_paddr;
+	};
 	phys_addr_t prefix_paddr;
 	struct icp_qat_fw_la_bulk_req fw_req;
 	uint8_t aad_len;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v7 3/3] test/cryptodev: add tests for GCM with 64 byte AAD
  2024-02-29 18:43 ` [PATCH v7 0/3] add QAT GEN LCE device Ciara Power
  2024-02-29 18:43   ` [PATCH v7 1/3] common/qat: add support for " Ciara Power
  2024-02-29 18:43   ` [PATCH v7 2/3] crypto/qat: update headers for GEN LCE support Ciara Power
@ 2024-02-29 18:43   ` Ciara Power
  2 siblings, 0 replies; 47+ messages in thread
From: Ciara Power @ 2024-02-29 18:43 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, gakhil,
	Nishikant Nayak, Fan Zhang

From: Nishikant Nayak <nishikanta.nayak@intel.com>

Adding one new unit test code for validating the features
added as part of GCM with 64 byte AAD, an AAD length that is not
currently covered by existing test vectors.
The new test case adds one new test for GCM algo for both
encrypt and decrypt operations.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Acked-by: Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com>
---
v7:
    - Added second developer to signed-off for v7 fixes.
    - Utilised 100 char line limit.
    - Moved NULL capability check earlier in test function.
v2:
    - Removed unused code.
    - Added one new unit test, AAD with GCM for GEN LCE.
---
 app/test/test_cryptodev.c                   | 43 ++++++++++++--
 app/test/test_cryptodev_aead_test_vectors.h | 62 +++++++++++++++++++++
 2 files changed, 99 insertions(+), 6 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index c3c3f587b4..c61ae9cfcc 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -12551,6 +12551,18 @@ test_AES_GCM_auth_decryption_test_case_256_7(void)
 	return test_authenticated_decryption(&gcm_test_case_256_7);
 }
 
+static int
+test_AES_GCM_auth_decryption_test_case_256_8(void)
+{
+	return test_authenticated_decryption(&gcm_test_case_256_8);
+}
+
+static int
+test_AES_GCM_auth_encryption_test_case_256_8(void)
+{
+	return test_authenticated_encryption(&gcm_test_case_256_8);
+}
+
 static int
 test_AES_GCM_auth_decryption_test_case_aad_1(void)
 {
@@ -12670,10 +12682,15 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0], &cap_idx);
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
@@ -12776,10 +12793,16 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0], &cap_idx);
+
+	if (capability == NULL)
+		return TEST_SKIPPED;
+
+	if (rte_cryptodev_sym_capability_check_aead(capability, tdata->key.len,
+			tdata->auth_tag.len, tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	/* not supported with CPU crypto and raw data-path APIs*/
@@ -15806,10 +15829,14 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0], &cap_idx);
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(capability, tdata->key.len,
+			tdata->auth_tag.len, tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	/*
@@ -17449,6 +17476,8 @@ static struct unit_test_suite cryptodev_aes_gcm_auth_testsuite  = {
 			test_AES_GCM_auth_encryption_test_case_256_6),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_auth_encryption_test_case_256_7),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encryption_test_case_256_8),
 
 		/** AES GCM Authenticated Decryption 256 bits key */
 		TEST_CASE_ST(ut_setup, ut_teardown,
@@ -17465,6 +17494,8 @@ static struct unit_test_suite cryptodev_aes_gcm_auth_testsuite  = {
 			test_AES_GCM_auth_decryption_test_case_256_6),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_auth_decryption_test_case_256_7),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_decryption_test_case_256_8),
 
 		/** AES GCM Authenticated Encryption big aad size */
 		TEST_CASE_ST(ut_setup, ut_teardown,
diff --git a/app/test/test_cryptodev_aead_test_vectors.h b/app/test/test_cryptodev_aead_test_vectors.h
index 9213e496db..35ae64c194 100644
--- a/app/test/test_cryptodev_aead_test_vectors.h
+++ b/app/test/test_cryptodev_aead_test_vectors.h
@@ -18,6 +18,16 @@ static uint8_t gcm_aad_text[MAX_AAD_LENGTH] = {
 		0x00, 0xf1, 0xe2, 0xd3, 0xc4, 0xb5, 0xa6, 0x97,
 		0x88, 0x79, 0x6a, 0x5b, 0x4c, 0x3d, 0x2e, 0x1f };
 
+static uint8_t gcm_aad_64B_text[MAX_AAD_LENGTH] = {
+		0xED, 0x3E, 0xA8, 0x1F, 0x74, 0xE5, 0xD1, 0x96,
+		0xA4, 0xD5, 0x4B, 0x26, 0xBB, 0x20, 0x61, 0x7B,
+		0x3B, 0x9C, 0x2A, 0x69, 0x90, 0xEF, 0xD7, 0x9A,
+		0x94, 0xC2, 0xF5, 0x86, 0xBD, 0x00, 0xF6, 0xEA,
+		0x0B, 0x14, 0x24, 0xF2, 0x08, 0x67, 0x42, 0x3A,
+		0xB5, 0xB8, 0x32, 0x97, 0xB5, 0x99, 0x69, 0x75,
+		0x60, 0x00, 0x8F, 0xF7, 0x6F, 0x16, 0x52, 0x66,
+		0xF1, 0xA9, 0x38, 0xFD, 0xB0, 0x61, 0x60, 0xB5 };
+
 static uint8_t ccm_aad_test_1[8] = {
 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
 };
@@ -1737,6 +1747,58 @@ static const struct aead_test_data gcm_test_case_256_7 = {
 	}
 };
 
+static const struct aead_test_data gcm_test_case_256_8 = {
+	.algo = RTE_CRYPTO_AEAD_AES_GCM,
+	.key = {
+		.data = {
+			0xD8, 0xFD, 0x8F, 0x5A, 0x13, 0x7B, 0x05, 0x2C,
+			0xA4, 0x64, 0x7A, 0xDD, 0x1E, 0x9A, 0x68, 0x33,
+			0x04, 0x70, 0xE8, 0x1E, 0x42, 0x84, 0x64, 0xD2,
+			0x23, 0xA1, 0x6A, 0x0A, 0x05, 0x7B, 0x90, 0xDE},
+		.len = 32
+	},
+	.iv = {
+		.data = {
+			0x8D, 0xDF, 0xB8, 0x7F, 0xD0, 0x79, 0x77, 0x55,
+			0xD5, 0x48, 0x03, 0x05},
+		.len = 12
+	},
+	.aad = {
+		.data = gcm_aad_64B_text,
+		.len = 64
+	},
+	.plaintext = {
+		.data = {
+			0x4D, 0xBC, 0x2C, 0x7F, 0x25, 0x1F, 0x07, 0x25,
+			0x54, 0x8C, 0x43, 0xDB, 0xD8, 0x06, 0x9F, 0xBF,
+			0xCA, 0x60, 0xF4, 0xEF, 0x13, 0x87, 0xE8, 0x2F,
+			0x4D, 0x9D, 0x1D, 0x87, 0x9F, 0x91, 0x79, 0x7E,
+			0x3E, 0x98, 0xA3, 0x63, 0xC6, 0xFE, 0xDB, 0x35,
+			0x96, 0x59, 0xB2, 0x0C, 0x80, 0x96, 0x70, 0x07,
+			0x87, 0x42, 0xAB, 0x4F, 0x31, 0x73, 0xC4, 0xF9,
+			0xB0, 0x1E, 0xF1, 0xBC, 0x7D, 0x45, 0xE5, 0xF3},
+		.len = 64
+	},
+	.ciphertext = {
+	    .data = {
+			0x21, 0xFA, 0x59, 0x4F, 0x1F, 0x6B, 0x19, 0xC2,
+			0x68, 0xBC, 0x05, 0x93, 0x4E, 0x48, 0x6C, 0x5B,
+			0x0B, 0x7A, 0x43, 0xB7, 0x60, 0x8E, 0x00, 0xC4,
+			0xAB, 0x14, 0x6B, 0xCC, 0xA1, 0x27, 0x6A, 0xDE,
+			0x8E, 0xB6, 0x98, 0xBB, 0x4F, 0xD0, 0x6F, 0x30,
+			0x0F, 0x04, 0xA8, 0x5B, 0xDC, 0xD8, 0xE8, 0x8A,
+			0x73, 0xD9, 0xB8, 0x60, 0x7C, 0xE4, 0x32, 0x4C,
+			0x3A, 0x0B, 0xC2, 0x82, 0xDA, 0x88, 0x17, 0x69},
+	    .len = 64
+	},
+	.auth_tag = {
+		.data = {
+			0x3B, 0x80, 0x83, 0x72, 0xE5, 0x1B, 0x94, 0x15,
+			0x75, 0xC8, 0x62, 0xBC, 0xA1, 0x66, 0x91, 0x45},
+		.len = 16
+	}
+};
+
 /** variable AAD AES-GCM-128 Test Vectors */
 static const struct aead_test_data gcm_test_case_aad_1 = {
 	.algo = RTE_CRYPTO_AEAD_AES_GCM,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v8 0/3] add QAT GEN LCE device
  2023-12-20 13:26 [PATCH 1/4] common/qat: add files specific to GEN5 Nishikant Nayak
                   ` (9 preceding siblings ...)
  2024-02-29 18:43 ` [PATCH v7 0/3] add QAT GEN LCE device Ciara Power
@ 2024-02-29 19:45 ` Ciara Power
  2024-02-29 19:45   ` [PATCH v8 1/3] common/qat: add support for " Ciara Power
                     ` (3 more replies)
  10 siblings, 4 replies; 47+ messages in thread
From: Ciara Power @ 2024-02-29 19:45 UTC (permalink / raw)
  To: dev; +Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, gakhil

This patchset adds a new QAT LCE device.
The device currently only supports symmetric crypto,
and only the AES-GCM algorithm.

v8: Rebased on latest next-crypto-for-main.
v7:
  - Squashed patch 1 and 2.
  - Fixed formatting to leverage 100 char line limit.
  - Removed unnecessary whitespace and indent changes.
  - Fixed copyright year typo on new file.
  - Added second developer to commit message signed-off tags.
v6:
  - Added documentation and release note changes.
  - Removed unused device PCI ID.
v5:
  - Fixed compilation issue by replacing __u8 with uint8_t.
v4:
  - Fixed cover letter, v3 included the wrong details relating
    to another patchset.
v3:
  - Fixed typos in commit and code comments.
  - Replaced use of linux/kernel.h macro with local macro
    to fix ARM compilation in CI.
v2:
   - Renamed device from GEN 5 to GEN LCE.
   - Removed unused code.
   - Updated macro names.

Nishikant Nayak (3):
  common/qat: add support for GEN LCE device
  crypto/qat: update headers for GEN LCE support
  test/cryptodev: add tests for GCM with 64 byte AAD

 .mailmap                                      |   1 +
 app/test/test_cryptodev.c                     |  43 ++-
 app/test/test_cryptodev_aead_test_vectors.h   |  62 ++++
 doc/guides/cryptodevs/qat.rst                 |   1 +
 doc/guides/rel_notes/release_24_03.rst        |   1 +
 drivers/common/qat/dev/qat_dev_gen_lce.c      | 295 +++++++++++++++++
 drivers/common/qat/meson.build                |   2 +
 .../qat/qat_adf/adf_transport_access_macros.h |   1 +
 .../adf_transport_access_macros_gen_lce.h     |  51 +++
 .../adf_transport_access_macros_gen_lcevf.h   |  48 +++
 drivers/common/qat/qat_adf/icp_qat_fw.h       |  34 ++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    |  59 +++-
 drivers/common/qat/qat_common.h               |   1 +
 drivers/common/qat/qat_device.c               |   5 +
 .../crypto/qat/dev/qat_crypto_pmd_gen_lce.c   | 310 ++++++++++++++++++
 drivers/crypto/qat/qat_sym.c                  |  14 +-
 drivers/crypto/qat/qat_sym.h                  |  57 +++-
 drivers/crypto/qat/qat_sym_session.c          |  57 +++-
 drivers/crypto/qat/qat_sym_session.h          |  10 +-
 19 files changed, 1037 insertions(+), 15 deletions(-)
 create mode 100644 drivers/common/qat/dev/qat_dev_gen_lce.c
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
 create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c

-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v8 1/3] common/qat: add support for GEN LCE device
  2024-02-29 19:45 ` [PATCH v8 0/3] add QAT GEN LCE device Ciara Power
@ 2024-02-29 19:45   ` Ciara Power
  2024-02-29 19:45   ` [PATCH v8 2/3] crypto/qat: update headers for GEN LCE support Ciara Power
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 47+ messages in thread
From: Ciara Power @ 2024-02-29 19:45 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, gakhil,
	Nishikant Nayak, Thomas Monjalon, Anatoly Burakov

From: Nishikant Nayak <nishikanta.nayak@intel.com>

Support is added for a new QAT device generation, GEN LCE.

This generation works slightly differently to previous
generations such as GEN 4, so many new files, functions and
macros are needed specifically for this generation.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Acked-by: Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com>
---
v7:
    - Squashed patch 1 + 2 together.
    - Updated commit message.
    - Added new signed off to cover changes made by
      second developer in v7.
    - Fixed copyright year for new files.
    - Utilised 100 char line limit.
v6:
    - Removed unused PCI device IDs from the device list.
    - Updated documentation and release note.
v5:
    - Replaced usage of __u8 with uint8_t.
v3:
    - Removed use of linux/kernel.h macro to fix ARM compilation.
    - Fixed typo in commit body and code comment.
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
    - Fixed code formatting
---
 .mailmap                                      |   1 +
 doc/guides/cryptodevs/qat.rst                 |   1 +
 doc/guides/rel_notes/release_24_03.rst        |   1 +
 drivers/common/qat/dev/qat_dev_gen_lce.c      | 295 +++++++++++++++++
 drivers/common/qat/meson.build                |   2 +
 .../qat/qat_adf/adf_transport_access_macros.h |   1 +
 .../adf_transport_access_macros_gen_lce.h     |  51 +++
 .../adf_transport_access_macros_gen_lcevf.h   |  48 +++
 drivers/common/qat/qat_adf/icp_qat_fw.h       |  34 ++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    |  59 +++-
 drivers/common/qat/qat_common.h               |   1 +
 drivers/common/qat/qat_device.c               |   5 +
 .../crypto/qat/dev/qat_crypto_pmd_gen_lce.c   | 310 ++++++++++++++++++
 drivers/crypto/qat/qat_sym.h                  |   6 +
 14 files changed, 814 insertions(+), 1 deletion(-)
 create mode 100644 drivers/common/qat/dev/qat_dev_gen_lce.c
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
 create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c

diff --git a/.mailmap b/.mailmap
index 58cca13ef6..8008e5a899 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1036,6 +1036,7 @@ Ning Li <muziding001@163.com> <lining18@jd.com>
 Nipun Gupta <nipun.gupta@amd.com> <nipun.gupta@nxp.com>
 Nir Efrati <nir.efrati@intel.com>
 Nirmoy Das <ndas@suse.de>
+Nishikant Nayak <nishikanta.nayak@intel.com>
 Nithin Dabilpuram <ndabilpuram@marvell.com> <nithin.dabilpuram@caviumnetworks.com>
 Nitin Saxena <nitin.saxena@caviumnetworks.com>
 Nitzan Weller <nitzanwe@mellanox.com>
diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index 28945bb5f3..68d792e4cc 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -28,6 +28,7 @@ poll mode crypto driver support for the following hardware accelerator devices:
 * ``Intel QuickAssist Technology 4xxx``
 * ``Intel QuickAssist Technology 300xx``
 * ``Intel QuickAssist Technology 420xx``
+* ``Intel QuickAssist Technology apfxx``
 
 
 Features
diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 439d354cd8..dc498a29ce 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -137,6 +137,7 @@ New Features
     devices in QAT crypto driver.
   * Enabled ZUC256 cipher and auth algorithm for wireless slice
     enabled GEN3 and GEN5 devices.
+  * Added support for GEN LCE (1454) device, for AES-GCM only.
 
 * **Updated Marvell cnxk crypto driver.**
 
diff --git a/drivers/common/qat/dev/qat_dev_gen_lce.c b/drivers/common/qat/dev/qat_dev_gen_lce.c
new file mode 100644
index 0000000000..6514321c32
--- /dev/null
+++ b/drivers/common/qat/dev/qat_dev_gen_lce.c
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_pci.h>
+#include <rte_vfio.h>
+
+#include "qat_device.h"
+#include "qat_qp.h"
+#include "adf_transport_access_macros_gen_lcevf.h"
+#include "adf_pf2vf_msg.h"
+#include "qat_pf2vf.h"
+
+#include <stdint.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#define BITS_PER_ULONG		(sizeof(unsigned long) * 8)
+
+#define VFIO_PCI_LCE_DEVICE_CFG_REGION_INDEX	VFIO_PCI_NUM_REGIONS
+#define VFIO_PCI_LCE_CY_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 2)
+#define VFIO_PCI_LCE_RING_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 4)
+#define LCE_DEVICE_NAME_SIZE			64
+#define LCE_DEVICE_MAX_BANKS			2080
+#define LCE_DIV_ROUND_UP(n, d)  (((n) + (d) - 1) / (d))
+#define LCE_DEVICE_BITMAP_SIZE	LCE_DIV_ROUND_UP(LCE_DEVICE_MAX_BANKS, BITS_PER_ULONG)
+
+/* QAT GEN_LCE specific macros */
+#define QAT_GEN_LCE_BUNDLE_NUM		LCE_DEVICE_MAX_BANKS
+#define QAT_GEN4_QPS_PER_BUNDLE_NUM	1
+
+/**
+ * struct lce_vfio_dev_cap - LCE device capabilities
+ *
+ * Device level capabilities and service level capabilities
+ */
+struct lce_vfio_dev_cap {
+	uint16_t device_num;
+	uint16_t device_type;
+	uint32_t capability_mask;
+	uint32_t extended_capabilities;
+	uint16_t max_banks;
+	uint16_t max_rings_per_bank;
+	uint16_t arb_mask;
+	uint16_t services;
+	uint16_t pkg_id;
+	uint16_t node_id;
+	uint8_t device_name[LCE_DEVICE_NAME_SIZE];
+};
+
+/* struct lce_vfio_dev_cy_cap - CY capabilities of LCE device */
+struct lce_vfio_dev_cy_cap {
+	uint32_t nr_banks;
+	unsigned long bitmap[LCE_DEVICE_BITMAP_SIZE];
+};
+
+struct lce_qat_domain {
+	uint32_t nid        :3;
+	uint32_t fid        :7;
+	uint32_t ftype      :2;
+	uint32_t vfid       :13;
+	uint32_t rid        :4;
+	uint32_t vld        :1;
+	uint32_t desc_over  :1;
+	uint32_t pasid_vld  :1;
+	uint32_t pasid      :20;
+};
+
+struct lce_qat_buf_domain {
+	uint32_t bank_id:   20;
+	uint32_t type:      4;
+	uint32_t resv:      8;
+	struct lce_qat_domain dom;
+};
+
+struct qat_dev_gen_lce_extra {
+	struct qat_qp_hw_data
+	    qp_gen_lce_data[QAT_GEN_LCE_BUNDLE_NUM][QAT_GEN4_QPS_PER_BUNDLE_NUM];
+};
+
+static struct qat_pf2vf_dev qat_pf2vf_gen_lce = {
+	.pf2vf_offset = ADF_4XXXIOV_PF2VM_OFFSET,
+	.vf2pf_offset = ADF_4XXXIOV_VM2PF_OFFSET,
+	.pf2vf_type_shift = ADF_PFVF_2X_MSGTYPE_SHIFT,
+	.pf2vf_type_mask = ADF_PFVF_2X_MSGTYPE_MASK,
+	.pf2vf_data_shift = ADF_PFVF_2X_MSGDATA_SHIFT,
+	.pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK,
+};
+
+static int
+qat_select_valid_queue_gen_lce(struct qat_pci_device *qat_dev, int qp_id,
+			    enum qat_service_type service_type)
+{
+	int i = 0, valid_qps = 0;
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+	for (; i < QAT_GEN_LCE_BUNDLE_NUM; i++) {
+		if (dev_extra->qp_gen_lce_data[i][0].service_type == service_type) {
+			if (valid_qps == qp_id)
+				return i;
+			++valid_qps;
+		}
+	}
+	return -1;
+}
+
+static const struct qat_qp_hw_data *
+qat_qp_get_hw_data_gen_lce(struct qat_pci_device *qat_dev,
+			enum qat_service_type service_type, uint16_t qp_id)
+{
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+	int ring_pair = qat_select_valid_queue_gen_lce(qat_dev, qp_id, service_type);
+
+	if (ring_pair < 0)
+		return NULL;
+
+	return &dev_extra->qp_gen_lce_data[ring_pair][0];
+}
+
+static int
+qat_qp_rings_per_service_gen_lce(struct qat_pci_device *qat_dev,
+			      enum qat_service_type service)
+{
+	int i = 0, count = 0, max_ops_per_srv = 0;
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+	max_ops_per_srv = QAT_GEN_LCE_BUNDLE_NUM;
+	for (i = 0, count = 0; i < max_ops_per_srv; i++)
+		if (dev_extra->qp_gen_lce_data[i][0].service_type == service)
+			count++;
+	return count;
+}
+
+static int qat_dev_read_config_gen_lce(struct qat_pci_device *qat_dev)
+{
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+	struct qat_qp_hw_data *hw_data;
+
+	/** Enable only crypto ring: RP-0 */
+	hw_data = &dev_extra->qp_gen_lce_data[0][0];
+	memset(hw_data, 0, sizeof(*hw_data));
+
+	hw_data->service_type = QAT_SERVICE_SYMMETRIC;
+	hw_data->tx_msg_size = 128;
+	hw_data->rx_msg_size = 32;
+
+	hw_data->tx_ring_num = 0;
+	hw_data->rx_ring_num = 1;
+
+	hw_data->hw_bundle_num = 0;
+
+	return 0;
+}
+
+static void qat_qp_build_ring_base_gen_lce(void *io_addr, struct qat_queue *queue)
+{
+	uint64_t queue_base;
+
+	queue_base = BUILD_RING_BASE_ADDR_GEN_LCE(queue->base_phys_addr, queue->queue_size);
+	WRITE_CSR_RING_BASE_GEN_LCEVF(io_addr, queue->hw_bundle_number,
+			queue->hw_queue_number, queue_base);
+}
+
+static void
+qat_qp_adf_arb_enable_gen_lce(const struct qat_queue *txq,
+			   void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+			(ADF_RING_BUNDLE_SIZE_GEN_LCE * txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, arb_csr_offset);
+	value |= 0x01;
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_arb_disable_gen_lce(const struct qat_queue *txq, void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+			(ADF_RING_BUNDLE_SIZE_GEN_LCE * txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, arb_csr_offset);
+	value &= ~(0x01);
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_configure_queues_gen_lce(struct qat_qp *qp)
+{
+	uint32_t q_tx_config, q_resp_config;
+	struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q;
+
+	/* q_tx/rx->queue_size is initialized as per bundle config register */
+	q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size);
+
+	q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size,
+					       ADF_RING_NEAR_WATERMARK_512,
+					       ADF_RING_NEAR_WATERMARK_0);
+
+	WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_tx->hw_bundle_number,
+			q_tx->hw_queue_number, q_tx_config);
+	WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_rx->hw_bundle_number,
+			q_rx->hw_queue_number, q_resp_config);
+}
+
+static void
+qat_qp_csr_write_tail_gen_lce(struct qat_qp *qp, struct qat_queue *q)
+{
+	WRITE_CSR_RING_TAIL_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, q->tail);
+}
+
+static void
+qat_qp_csr_write_head_gen_lce(struct qat_qp *qp, struct qat_queue *q, uint32_t new_head)
+{
+	WRITE_CSR_RING_HEAD_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, new_head);
+}
+
+static void
+qat_qp_csr_setup_gen_lce(struct qat_pci_device *qat_dev, void *io_addr, struct qat_qp *qp)
+{
+	qat_qp_build_ring_base_gen_lce(io_addr, &qp->tx_q);
+	qat_qp_build_ring_base_gen_lce(io_addr, &qp->rx_q);
+	qat_qp_adf_configure_queues_gen_lce(qp);
+	qat_qp_adf_arb_enable_gen_lce(&qp->tx_q, qp->mmap_bar_addr, &qat_dev->arb_csr_lock);
+}
+
+static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen_lce = {
+	.qat_qp_rings_per_service = qat_qp_rings_per_service_gen_lce,
+	.qat_qp_build_ring_base = qat_qp_build_ring_base_gen_lce,
+	.qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen_lce,
+	.qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen_lce,
+	.qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen_lce,
+	.qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen_lce,
+	.qat_qp_csr_write_head = qat_qp_csr_write_head_gen_lce,
+	.qat_qp_csr_setup = qat_qp_csr_setup_gen_lce,
+	.qat_qp_get_hw_data = qat_qp_get_hw_data_gen_lce,
+};
+
+static int
+qat_reset_ring_pairs_gen_lce(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static const struct rte_mem_resource*
+qat_dev_get_transport_bar_gen_lce(struct rte_pci_device *pci_dev)
+{
+	return &pci_dev->mem_resource[0];
+}
+
+static int
+qat_dev_get_misc_bar_gen_lce(struct rte_mem_resource **mem_resource,
+			  struct rte_pci_device *pci_dev)
+{
+	*mem_resource = &pci_dev->mem_resource[2];
+	return 0;
+}
+
+static int
+qat_dev_get_extra_size_gen_lce(void)
+{
+	return sizeof(struct qat_dev_gen_lce_extra);
+}
+
+static int
+qat_dev_get_slice_map_gen_lce(uint32_t *map __rte_unused,
+	const struct rte_pci_device *pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen_lce = {
+	.qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen_lce,
+	.qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen_lce,
+	.qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen_lce,
+	.qat_dev_read_config = qat_dev_read_config_gen_lce,
+	.qat_dev_get_extra_size = qat_dev_get_extra_size_gen_lce,
+	.qat_dev_get_slice_map = qat_dev_get_slice_map_gen_lce,
+};
+
+RTE_INIT(qat_dev_gen_lce_init)
+{
+	qat_qp_hw_spec[QAT_GEN_LCE] = &qat_qp_hw_spec_gen_lce;
+	qat_dev_hw_spec[QAT_GEN_LCE] = &qat_dev_hw_spec_gen_lce;
+	qat_gen_config[QAT_GEN_LCE].dev_gen = QAT_GEN_LCE;
+	qat_gen_config[QAT_GEN_LCE].pf2vf_dev = &qat_pf2vf_gen_lce;
+}
diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index d79085258f..3893b127dd 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -83,6 +83,7 @@ sources += files(
         'dev/qat_dev_gen3.c',
         'dev/qat_dev_gen4.c',
         'dev/qat_dev_gen5.c',
+        'dev/qat_dev_gen_lce.c',
 )
 includes += include_directories(
         'qat_adf',
@@ -111,6 +112,7 @@ if qat_crypto
             'dev/qat_crypto_pmd_gen3.c',
             'dev/qat_crypto_pmd_gen4.c',
             'dev/qat_crypto_pmd_gen5.c',
+            'dev/qat_crypto_pmd_gen_lce.c',
         ]
         sources += files(join_paths(qat_crypto_relpath, f))
     endforeach
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
index 12a7258c60..19bd812419 100644
--- a/drivers/common/qat/qat_adf/adf_transport_access_macros.h
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
@@ -47,6 +47,7 @@
 #define ADF_RING_SIZE_512 0x03
 #define ADF_RING_SIZE_4K 0x06
 #define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_64K 0x0A
 #define ADF_RING_SIZE_4M 0x10
 #define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
 #define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
new file mode 100644
index 0000000000..eac0d30f49
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+
+#include "adf_transport_access_macros.h"
+
+#define ADF_RINGS_PER_INT_SRCSEL_GEN4 2
+#define ADF_BANK_INT_SRC_SEL_MASK_GEN4 0x44UL
+#define ADF_BANK_INT_FLAG_CLEAR_MASK_GEN4 0x3
+#define ADF_RING_BUNDLE_SIZE_GEN_LCE 0x2000
+#define ADF_RING_CSR_RING_CONFIG_GEN_LCE 0x1000
+#define ADF_RING_CSR_RING_LBASE_GEN_LCE 0x1040
+#define ADF_RING_CSR_RING_UBASE_GEN_LCE 0x1080
+
+#define BUILD_RING_BASE_ADDR_GEN_LCE(addr, size) \
+	((((addr) >> 6) & (0xFFFFFFFFFFFFFFFFULL << (size))) << 6)
+
+#define WRITE_CSR_RING_BASE_GEN_LCE(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32);	\
+	ADF_CSR_WR(csr_base_addr,	\
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) +			\
+		ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2),		\
+		l_base);						\
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) +			\
+		ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+
+#define WRITE_CSR_RING_HEAD_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+
+#endif
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
new file mode 100644
index 0000000000..3c7232de12
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+
+#include "adf_transport_access_macros.h"
+#include "adf_transport_access_macros_gen_lce.h"
+
+#define ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF 0x0
+
+#define WRITE_CSR_RING_BASE_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2),	\
+		l_base);	\
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_HEAD_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_SRV_ARB_EN_GEN_LCEVF(csr_base_addr, bank, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+
+#endif
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw.h b/drivers/common/qat/qat_adf/icp_qat_fw.h
index dd7c926140..4c7bbf0f54 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw.h
@@ -57,6 +57,12 @@ struct icp_qat_fw_comn_req_hdr_cd_pars {
 	} u;
 };
 
+struct lce_key_buff_desc {
+	uint64_t keybuff;
+	uint32_t keybuff_resrvd1;
+	uint32_t keybuff_resrvd2;
+};
+
 struct icp_qat_fw_comn_req_mid {
 	uint64_t opaque_data;
 	uint64_t src_data_addr;
@@ -124,6 +130,12 @@ struct icp_qat_fw_comn_resp {
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_BITPOS 0
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_MASK 0x1
 
+/* GEN_LCE specific Common Header fields */
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS 5
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_MASK 0x3
+#define ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT 3
+#define ICP_QAT_FW_COMN_GEN_LCE_STATUS_FLAG_ERROR 0
+
 #define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
 	icp_qat_fw_comn_req_hdr_t.service_type
 
@@ -169,6 +181,12 @@ struct icp_qat_fw_comn_resp {
 	(((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
 	 ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
 
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(valid, desc_layout) \
+	((((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+	ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) | \
+	(((desc_layout) & ICP_QAT_FW_COMN_DESC_LAYOUT_MASK) << \
+	ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS))
+
 #define QAT_COMN_PTR_TYPE_BITPOS 0
 #define QAT_COMN_PTR_TYPE_MASK 0x1
 #define QAT_COMN_CD_FLD_TYPE_BITPOS 1
@@ -178,10 +196,20 @@ struct icp_qat_fw_comn_resp {
 #define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
 #define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
 
+/* GEN_LCE specific Common Request Flags fields */
+#define QAT_COMN_KEYBUF_USAGE_BITPOS 1
+#define QAT_COMN_KEYBUF_USAGE_MASK 0x1
+#define QAT_COMN_KEY_BUFFER_USED 1
+
 #define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
 	((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
 	 | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
 
+#define ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(ptr, keybuf) \
+	((((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS) | \
+	 (((keybuf) & QAT_COMN_PTR_TYPE_MASK) << \
+	   QAT_COMN_KEYBUF_USAGE_BITPOS))
+
 #define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
 	QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
 
@@ -247,6 +275,8 @@ struct icp_qat_fw_comn_resp {
 #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1
+#define QAT_COMN_RESP_INVALID_PARAM_BITPOS 1
+#define QAT_COMN_RESP_INVALID_PARAM_MASK 0x1
 #define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0
 #define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1
 
@@ -278,6 +308,10 @@ struct icp_qat_fw_comn_resp {
 	QAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \
 	QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK)
 
+#define ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_INVALID_PARAM_BITPOS, \
+	QAT_COMN_RESP_INVALID_PARAM_MASK)
+
 #define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
 #define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
 #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index 134c309355..67fc25c919 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -22,14 +22,24 @@ enum icp_qat_fw_la_cmd_id {
 	ICP_QAT_FW_LA_CMD_DELIMITER = 18
 };
 
+/* In GEN_LCE Command ID 4 corresponds to AEAD */
+#define ICP_QAT_FW_LA_CMD_AEAD 4
+
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 #define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 
+/* GEN_LCE Hash, HMAC and GCM Verification Status */
+#define ICP_QAT_FW_LA_VER_STATUS_FAIL ICP_QAT_FW_COMN_GEN_LCE_STATUS_FLAG_ERROR
+
+
 struct icp_qat_fw_la_bulk_req {
 	struct icp_qat_fw_comn_req_hdr comn_hdr;
-	struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+	union {
+		struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+		struct lce_key_buff_desc key_buff;
+	};
 	struct icp_qat_fw_comn_req_mid comn_mid;
 	struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
 	struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
@@ -90,6 +100,21 @@ struct icp_qat_fw_la_bulk_req {
 #define QAT_LA_USE_WAT_SLICE_BITPOS 3
 #define QAT_LA_USE_WAT_SLICE 1
 #define QAT_LA_USE_WAT_SLICE_MASK 0x1
+
+/* GEN_LCE specific Crypto Flags fields */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS 6
+#define ICP_QAT_FW_SYM_AEAD_ALGO_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_SIZE_BITPOS 9
+#define ICP_QAT_FW_SYM_IV_SIZE_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS 11
+#define ICP_QAT_FW_SYM_IV_IN_DESC_MASK 0x1
+#define ICP_QAT_FW_SYM_IV_IN_DESC_VALID 1
+#define ICP_QAT_FW_SYM_DIRECTION_BITPOS 15
+#define ICP_QAT_FW_SYM_DIRECTION_MASK 0x1
+
+/* In GEN_LCE AEAD AES GCM Algorithm has ID 0 */
+#define QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE 0
+
 #define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
 	cmp_auth, ret_auth, update_state, \
 	ciph_iv, ciphcfg, partial) \
@@ -212,6 +237,23 @@ struct icp_qat_fw_la_bulk_req {
 	QAT_LA_USE_WAT_SLICE_BITPOS, \
 	QAT_LA_USE_WAT_SLICE_MASK)
 
+/* GEN_LCE specific Crypto Flags operations */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS, \
+	ICP_QAT_FW_SYM_AEAD_ALGO_MASK)
+
+#define ICP_QAT_FW_SYM_IV_SIZE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_SIZE_BITPOS, \
+	ICP_QAT_FW_SYM_IV_SIZE_MASK)
+
+#define ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS, \
+	ICP_QAT_FW_SYM_IV_IN_DESC_MASK)
+
+#define ICP_QAT_FW_SYM_DIR_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_DIRECTION_BITPOS, \
+	ICP_QAT_FW_SYM_DIRECTION_MASK)
+
 #define QAT_FW_LA_MODE2 1
 #define QAT_FW_LA_NO_MODE2 0
 #define QAT_FW_LA_MODE2_MASK 0x1
@@ -434,4 +476,19 @@ struct icp_qat_fw_la_cipher_20_req_params {
 	uint8_t    spc_auth_res_sz;
 };
 
+struct icp_qat_fw_la_cipher_30_req_params {
+		uint32_t   spc_aad_sz;
+		uint8_t    cipher_length;
+		uint8_t    reserved[2];
+		uint8_t    spc_auth_res_sz;
+		union {
+				uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+				struct {
+						uint64_t cipher_IV_ptr;
+						uint64_t resrvd1;
+			} s;
+
+		} u;
+};
+
 #endif
diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h
index dc48a2e1ee..44a8dff802 100644
--- a/drivers/common/qat/qat_common.h
+++ b/drivers/common/qat/qat_common.h
@@ -22,6 +22,7 @@ enum qat_device_gen {
 	QAT_GEN3,
 	QAT_GEN4,
 	QAT_GEN5,
+	QAT_GEN_LCE,
 	QAT_N_GENS
 };
 
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index 0ccc3f85fd..a77c628256 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -68,6 +68,9 @@ static const struct rte_pci_id pci_id_qat_map[] = {
 		{
 			RTE_PCI_DEVICE(0x8086, 0x4947),
 		},
+		{
+			RTE_PCI_DEVICE(0x8086, 0x1454),
+		},
 		{.device_id = 0},
 };
 
@@ -208,6 +211,8 @@ pick_gen(const struct rte_pci_device *pci_dev)
 		return QAT_GEN4;
 	case 0x4947:
 		return QAT_GEN5;
+	case 0x1454:
+		return QAT_GEN_LCE;
 	default:
 		QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
 		return QAT_N_GENS;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
new file mode 100644
index 0000000000..7298916f2a
--- /dev/null
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
+#include "qat_sym_session.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
+#include "qat_crypto.h"
+#include "qat_crypto_pmd_gens.h"
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen_lce[] = {
+	QAT_SYM_AEAD_CAP(AES_GCM,
+		CAP_SET(block_size, 16),
+		CAP_RNG(key_size, 32, 32, 0), CAP_RNG(digest_size, 16, 16, 0),
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+qat_sgl_add_buffer_gen_lce(void *list_in, uint64_t addr, uint32_t len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr;
+
+	nr = list->num_bufs;
+
+	if (nr >= QAT_SYM_SGL_MAX_NUMBER) {
+		QAT_DP_LOG(ERR, "Adding %d entry failed, no empty SGL buffer", nr);
+		return -EINVAL;
+	}
+
+	list->buffers[nr].len = len;
+	list->buffers[nr].resrvd = 0;
+	list->buffers[nr].addr = addr;
+
+	list->num_bufs++;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+	QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+		nr, list->buffers[nr].len, list->buffers[nr].addr);
+#endif
+	return 0;
+}
+
+static int
+qat_sgl_fill_array_with_mbuf(struct rte_mbuf *buf, int64_t offset,
+		void *list_in, uint32_t data_len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr, buf_len;
+	int res = -EINVAL;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	uint32_t start_idx = list->num_bufs;
+#endif
+
+	/* Append to the existing list */
+	nr = list->num_bufs;
+
+	for (buf_len = 0; buf && nr < QAT_SYM_SGL_MAX_NUMBER; buf = buf->next) {
+		if (offset >= rte_pktmbuf_data_len(buf)) {
+			offset -= rte_pktmbuf_data_len(buf);
+			/* Jump to next mbuf */
+			continue;
+		}
+
+		list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset;
+		list->buffers[nr].resrvd = 0;
+		list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
+
+		offset = 0;
+		buf_len += list->buffers[nr].len;
+
+		if (buf_len >= data_len) {
+			list->buffers[nr].len -= buf_len - data_len;
+			res = 0;
+			break;
+		}
+		++nr;
+	}
+
+	if (unlikely(res != 0)) {
+		if (nr == QAT_SYM_SGL_MAX_NUMBER)
+			QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+					QAT_SYM_SGL_MAX_NUMBER);
+		else
+			QAT_DP_LOG(ERR, "Mbuf chain is too short");
+	} else {
+
+		list->num_bufs = ++nr;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+		for (nr = start_idx; nr < list->num_bufs; nr++) {
+			QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+					nr, list->buffers[nr].len,
+					list->buffers[nr].addr);
+		}
+#endif
+	}
+
+	return res;
+}
+
+static int
+qat_sym_build_op_aead_gen_lce(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_op *op = in_op;
+	uint64_t digest_phys_addr, aad_phys_addr;
+	uint16_t iv_len, aad_len, digest_len, key_len;
+	uint32_t cipher_ofs, iv_offset, cipher_len;
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct icp_qat_fw_la_cipher_30_req_params *cipher_param;
+	enum icp_qat_hw_cipher_dir dir;
+	bool is_digest_adjacent = false;
+
+	if (ctx->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER ||
+		ctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_AES256 ||
+		ctx->qat_mode != ICP_QAT_HW_CIPHER_AEAD_MODE) {
+
+		QAT_DP_LOG(ERR, "Not supported (cmd: %d, alg: %d, mode: %d). "
+			"GEN_LCE PMD only supports AES-256 AEAD mode",
+			ctx->qat_cmd, ctx->qat_cipher_alg, ctx->qat_mode);
+		return -EINVAL;
+	}
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+	cipher_param = (void *)&qat_req->serv_specif_rqpars;
+
+	dir = ctx->qat_dir;
+
+	aad_phys_addr = op->sym->aead.aad.phys_addr;
+	aad_len = ctx->aad_len;
+
+	iv_offset = ctx->cipher_iv.offset;
+	iv_len = ctx->cipher_iv.length;
+
+	cipher_ofs = op->sym->aead.data.offset;
+	cipher_len = op->sym->aead.data.length;
+
+	digest_phys_addr = op->sym->aead.digest.phys_addr;
+	digest_len = ctx->digest_length;
+
+	/* Up to 16B IV can be directly embedded in descriptor.
+	 *  GCM supports only 12B IV for GEN LCE
+	 */
+	if (iv_len != GCM_IV_LENGTH_GEN_LCE) {
+		QAT_DP_LOG(ERR, "iv_len: %d not supported. Must be 12B.", iv_len);
+		return -EINVAL;
+	}
+
+	rte_memcpy(cipher_param->u.cipher_IV_array,
+			rte_crypto_op_ctod_offset(op, uint8_t*, iv_offset), iv_len);
+
+	/* Always SGL */
+	RTE_ASSERT((qat_req->comn_hdr.comn_req_flags & ICP_QAT_FW_SYM_COMM_ADDR_SGL) == 1);
+	/* Always inplace */
+	RTE_ASSERT(op->sym->m_dst == NULL);
+
+	/* Key buffer address is already programmed by reusing the
+	 * content-descriptor buffer
+	 */
+	key_len = ctx->auth_key_length;
+
+	cipher_param->spc_aad_sz = aad_len;
+	cipher_param->cipher_length = key_len;
+	cipher_param->spc_auth_res_sz = digest_len;
+
+	/* Knowing digest is contiguous to cipher-text helps optimizing SGL */
+	if (rte_pktmbuf_iova_offset(op->sym->m_src, cipher_ofs + cipher_len) == digest_phys_addr)
+		is_digest_adjacent = true;
+
+	/* SRC-SGL: 3 entries:
+	 * a) AAD
+	 * b) cipher
+	 * c) digest (only for decrypt and buffer is_NOT_adjacent)
+	 *
+	 */
+	cookie->qat_sgl_src.num_bufs = 0;
+	if (aad_len)
+		qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src, aad_phys_addr, aad_len);
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_src,
+				cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_src,
+				cipher_len);
+
+		/* Digest buffer in decrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_DECRYPT)
+			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src,
+					digest_phys_addr, digest_len);
+	}
+
+	/* (in-place) DST-SGL: 2 entries:
+	 * a) cipher
+	 * b) digest (only for encrypt and buffer is_NOT_adjacent)
+	 */
+	cookie->qat_sgl_dst.num_bufs = 0;
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_dst,
+				cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_dst,
+				cipher_len);
+
+		/* Digest buffer in Encrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_dst,
+					digest_phys_addr, digest_len);
+	}
+
+	/* Length values in 128B descriptor */
+	qat_req->comn_mid.src_length = cipher_len;
+	qat_req->comn_mid.dst_length = cipher_len;
+
+	if (dir == ICP_QAT_HW_CIPHER_ENCRYPT) /* Digest buffer in Encrypt job */
+		qat_req->comn_mid.dst_length += GCM_256_DIGEST_LEN;
+
+	/* src & dst SGL addresses in 128B descriptor */
+	qat_req->comn_mid.src_data_addr = cookie->qat_sgl_src_phys_addr;
+	qat_req->comn_mid.dest_data_addr = cookie->qat_sgl_dst_phys_addr;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, sizeof(struct icp_qat_fw_la_bulk_req));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+			rte_pktmbuf_data_len(op->sym->m_src));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data, digest_len);
+	QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data, aad_len);
+#endif
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen_lce(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+
+	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
+		return -EINVAL;
+
+	/* build request for aead */
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES256 &&
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) {
+		build_request = qat_sym_build_op_aead_gen_lce;
+		ctx->build_request[proc_type] = build_request;
+	}
+	return 0;
+}
+
+
+static int
+qat_sym_crypto_cap_get_gen_lce(struct qat_cryptodev_private *internals,
+	const char *capa_memz_name,
+	const uint16_t __rte_unused slice_map)
+{
+	const uint32_t size = sizeof(qat_sym_crypto_caps_gen_lce);
+	uint32_t i;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name, size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG, "Error allocating memzone for capabilities");
+			return -1;
+		}
+	}
+
+	struct rte_cryptodev_capabilities *addr =
+		(struct rte_cryptodev_capabilities *)
+		internals->capa_mz->addr;
+	const struct rte_cryptodev_capabilities *capabilities =
+		qat_sym_crypto_caps_gen_lce;
+	const uint32_t capa_num = size / sizeof(struct rte_cryptodev_capabilities);
+	uint32_t curr_capa = 0;
+
+	for (i = 0; i < capa_num; i++) {
+		memcpy(addr + curr_capa, capabilities + i,
+				sizeof(struct rte_cryptodev_capabilities));
+		curr_capa++;
+	}
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	return 0;
+}
+
+RTE_INIT(qat_sym_crypto_gen_lce_init)
+{
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = &qat_sym_crypto_ops_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_capabilities = qat_sym_crypto_cap_get_gen_lce;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_session = qat_sym_crypto_set_session_gen_lce;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_raw_dp_ctx = NULL;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags = qat_sym_crypto_feature_flags_get_gen1;
+}
+
+RTE_INIT(qat_asym_crypto_gen_lce_init)
+{
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_capabilities = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].set_session = NULL;
+}
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f976009bf2..f2f197d050 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -95,6 +95,12 @@
 /* Maximum data length for single pass GMAC: 2^14-1 */
 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
 
+/* Digest length for GCM Algo is 16 bytes */
+#define GCM_256_DIGEST_LEN 16
+
+/* IV length for GCM algo is 12 bytes */
+#define GCM_IV_LENGTH_GEN_LCE 12
+
 struct qat_sym_session;
 
 struct qat_sym_sgl {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v8 2/3] crypto/qat: update headers for GEN LCE support
  2024-02-29 19:45 ` [PATCH v8 0/3] add QAT GEN LCE device Ciara Power
  2024-02-29 19:45   ` [PATCH v8 1/3] common/qat: add support for " Ciara Power
@ 2024-02-29 19:45   ` Ciara Power
  2024-02-29 19:45   ` [PATCH v8 3/3] test/cryptodev: add tests for GCM with 64 byte AAD Ciara Power
  2024-03-01  6:12   ` [EXTERNAL] [PATCH v8 0/3] add QAT GEN LCE device Akhil Goyal
  3 siblings, 0 replies; 47+ messages in thread
From: Ciara Power @ 2024-02-29 19:45 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, gakhil,
	Nishikant Nayak

From: Nishikant Nayak <nishikanta.nayak@intel.com>

This patch handles the changes required for updating the common
header fields specific to GEN LCE, Also added/updated of the response
processing APIs based on GEN LCE requirement.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Acked-by: Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com>
---
v7:
    - Removed unnecessary whitespace and indent changes.
    - Added signed-off for second developer that worked on v7.
    - Utilised 100 char line limit.
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
    - Added GEN LCE specific API for deque burst.
    - Fixed code formatting.
---
 drivers/crypto/qat/qat_sym.c         | 14 ++++++-
 drivers/crypto/qat/qat_sym.h         | 51 ++++++++++++++++++++++++-
 drivers/crypto/qat/qat_sym_session.c | 57 ++++++++++++++++++++++++++--
 drivers/crypto/qat/qat_sym_session.h | 10 ++++-
 4 files changed, 124 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 6e03bde841..9113dfef56 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -180,7 +180,13 @@ qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
 	return qat_dequeue_op_burst(qp, (void **)ops,
-				qat_sym_process_response, nb_ops);
+							qat_sym_process_response, nb_ops);
+}
+
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_sym_process_response_gen_lce, nb_ops);
 }
 
 int
@@ -200,6 +206,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
 	struct rte_cryptodev *cryptodev;
 	struct qat_cryptodev_private *internals;
+	enum qat_device_gen qat_dev_gen = qat_pci_dev->qat_dev_gen;
 	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
 		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
 
@@ -249,7 +256,10 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
 	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
-	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
+	if (qat_dev_gen == QAT_GEN_LCE)
+		cryptodev->dequeue_burst = qat_sym_dequeue_burst_gen_lce;
+	else
+		cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
 	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f2f197d050..6616064251 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -142,6 +142,9 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops);
+
 #ifdef RTE_QAT_OPENSSL
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
@@ -390,6 +393,46 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
 	return 1;
 }
 
+static __rte_always_inline int
+qat_sym_process_response_gen_lce(void **op, uint8_t *resp, void *op_cookie __rte_unused,
+		uint64_t *dequeue_err_count __rte_unused)
+{
+	struct icp_qat_fw_comn_resp *resp_msg = (struct icp_qat_fw_comn_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t) (resp_msg->opaque_data);
+	struct qat_sym_session *sess;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+			sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+	sess = CRYPTODEV_GET_SYM_SESS_PRIV(rx_op->sym->session);
+
+	rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+	else if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+	if (sess->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		if (ICP_QAT_FW_LA_VER_STATUS_FAIL == ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+				resp_msg->comn_hdr.comn_status))
+			rx_op->status =	RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	}
+
+	*op = (void *)rx_op;
+
+	/*
+	 * return 1 as dequeue op only move on to the next op
+	 * if one was ready to return to API
+	 */
+	return 1;
+}
+
 int
 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
@@ -455,7 +498,13 @@ qat_sym_preprocess_requests(void **ops __rte_unused,
 
 static inline void
 qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
-	void *op_cookie __rte_unused)
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
+{
+}
+
+static inline void
+qat_sym_process_response_gen_lce(void **op __rte_unused, uint8_t *resp __rte_unused,
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
 {
 }
 
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 39e4a833ec..e763cfcb51 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -136,6 +136,9 @@ qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 static void
 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session);
+
 /* Req/cd init functions */
 
 static void
@@ -757,6 +760,12 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		session->qat_cmd);
 		return -ENOTSUP;
 	}
+
+	if (qat_dev_gen == QAT_GEN_LCE) {
+		qat_sym_session_init_gen_lce_hdr(session);
+		return 0;
+	}
+
 	qat_sym_session_finalize(session);
 
 	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
@@ -1103,6 +1112,12 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 			dev->data->dev_private;
 	enum qat_device_gen qat_dev_gen =
 			internals->qat_dev->qat_dev_gen;
+	if (qat_dev_gen == QAT_GEN_LCE) {
+		struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+		struct lce_key_buff_desc *key_buff = &req_tmpl->key_buff;
+
+		key_buff->keybuff = session->key_paddr;
+	}
 
 	/*
 	 * Store AEAD IV parameters as cipher IV,
@@ -1166,9 +1181,14 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 	}
 
 	if (session->is_single_pass) {
-		if (qat_sym_cd_cipher_set(session,
-				aead_xform->key.data, aead_xform->key.length))
-			return -EINVAL;
+		if (qat_dev_gen != QAT_GEN_LCE) {
+			if (qat_sym_cd_cipher_set(session,
+					aead_xform->key.data, aead_xform->key.length))
+				return -EINVAL;
+		} else {
+			session->auth_key_length = aead_xform->key.length;
+			memcpy(session->key_array, aead_xform->key.data, aead_xform->key.length);
+		}
 	} else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
 			aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
 			(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
@@ -2074,6 +2094,37 @@ qat_sym_session_init_common_hdr(struct qat_sym_session *session)
 					ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
 }
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session)
+{
+	struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+	/*
+	 * GEN_LCE specifies separate command id for AEAD operations but Cryptodev
+	 * API processes AEAD operations as Single pass Crypto operations.
+	 * Hence even for GEN_LCE, Session Algo Command ID is CIPHER.
+	 * Note, however Session Algo Mode is AEAD.
+	 */
+	header->service_cmd_id = ICP_QAT_FW_LA_CMD_AEAD;
+	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+	header->hdr_flags = ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(ICP_QAT_FW_COMN_REQ_FLAG_SET,
+			ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT);
+	header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(QAT_COMN_PTR_TYPE_SGL,
+			QAT_COMN_KEY_BUFFER_USED);
+
+	ICP_QAT_FW_SYM_AEAD_ALGO_SET(header->serv_specif_flags, QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE);
+	ICP_QAT_FW_SYM_IV_SIZE_SET(header->serv_specif_flags, ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+	ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(header->serv_specif_flags,
+			ICP_QAT_FW_SYM_IV_IN_DESC_VALID);
+
+	if (session->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags, ICP_QAT_HW_CIPHER_DECRYPT);
+	} else {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags, ICP_QAT_HW_CIPHER_ENCRYPT);
+	}
+}
+
 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 						const uint8_t *cipherkey,
 						uint32_t cipherkeylen)
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 2e25c90342..c41f8cc791 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -111,10 +111,16 @@ struct qat_sym_session {
 	enum icp_qat_hw_auth_op auth_op;
 	enum icp_qat_hw_auth_mode auth_mode;
 	void *bpi_ctx;
-	struct qat_sym_cd cd;
+	union {
+		struct qat_sym_cd cd;
+		uint8_t key_array[32];
+	};
 	uint8_t prefix_state[QAT_PREFIX_TBL_SIZE] __rte_cache_aligned;
 	uint8_t *cd_cur_ptr;
-	phys_addr_t cd_paddr;
+	union {
+		phys_addr_t cd_paddr;
+		phys_addr_t key_paddr;
+	};
 	phys_addr_t prefix_paddr;
 	struct icp_qat_fw_la_bulk_req fw_req;
 	uint8_t aad_len;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* [PATCH v8 3/3] test/cryptodev: add tests for GCM with 64 byte AAD
  2024-02-29 19:45 ` [PATCH v8 0/3] add QAT GEN LCE device Ciara Power
  2024-02-29 19:45   ` [PATCH v8 1/3] common/qat: add support for " Ciara Power
  2024-02-29 19:45   ` [PATCH v8 2/3] crypto/qat: update headers for GEN LCE support Ciara Power
@ 2024-02-29 19:45   ` Ciara Power
  2024-03-01  6:12   ` [EXTERNAL] [PATCH v8 0/3] add QAT GEN LCE device Akhil Goyal
  3 siblings, 0 replies; 47+ messages in thread
From: Ciara Power @ 2024-02-29 19:45 UTC (permalink / raw)
  To: dev
  Cc: ciara.power, kai.ji, arkadiuszx.kusztal, rakesh.s.joshi, gakhil,
	Nishikant Nayak, Fan Zhang

From: Nishikant Nayak <nishikanta.nayak@intel.com>

Adding one new unit test code for validating the features
added as part of GCM with 64 byte AAD, an AAD length that is not
currently covered by existing test vectors.
The new test case adds one new test for GCM algo for both
encrypt and decrypt operations.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Acked-by: Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com>
---
v7:
    - Added second developer to signed-off for v7 fixes.
    - Utilised 100 char line limit.
    - Moved NULL capability check earlier in test function.
v2:
    - Removed unused code.
    - Added one new unit test, AAD with GCM for GEN LCE.
---
 app/test/test_cryptodev.c                   | 43 ++++++++++++--
 app/test/test_cryptodev_aead_test_vectors.h | 62 +++++++++++++++++++++
 2 files changed, 99 insertions(+), 6 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index c3c3f587b4..c61ae9cfcc 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -12551,6 +12551,18 @@ test_AES_GCM_auth_decryption_test_case_256_7(void)
 	return test_authenticated_decryption(&gcm_test_case_256_7);
 }
 
+static int
+test_AES_GCM_auth_decryption_test_case_256_8(void)
+{
+	return test_authenticated_decryption(&gcm_test_case_256_8);
+}
+
+static int
+test_AES_GCM_auth_encryption_test_case_256_8(void)
+{
+	return test_authenticated_encryption(&gcm_test_case_256_8);
+}
+
 static int
 test_AES_GCM_auth_decryption_test_case_aad_1(void)
 {
@@ -12670,10 +12682,15 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0], &cap_idx);
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(
+		capability, tdata->key.len, tdata->auth_tag.len,
+		tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
@@ -12776,10 +12793,16 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0], &cap_idx);
+
+	if (capability == NULL)
+		return TEST_SKIPPED;
+
+	if (rte_cryptodev_sym_capability_check_aead(capability, tdata->key.len,
+			tdata->auth_tag.len, tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	/* not supported with CPU crypto and raw data-path APIs*/
@@ -15806,10 +15829,14 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *capability;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
 	cap_idx.algo.aead = tdata->algo;
-	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
-			&cap_idx) == NULL)
+	capability = rte_cryptodev_sym_capability_get(ts_params->valid_devs[0], &cap_idx);
+	if (capability == NULL)
+		return TEST_SKIPPED;
+	if (rte_cryptodev_sym_capability_check_aead(capability, tdata->key.len,
+			tdata->auth_tag.len, tdata->aad.len, tdata->iv.len))
 		return TEST_SKIPPED;
 
 	/*
@@ -17449,6 +17476,8 @@ static struct unit_test_suite cryptodev_aes_gcm_auth_testsuite  = {
 			test_AES_GCM_auth_encryption_test_case_256_6),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_auth_encryption_test_case_256_7),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encryption_test_case_256_8),
 
 		/** AES GCM Authenticated Decryption 256 bits key */
 		TEST_CASE_ST(ut_setup, ut_teardown,
@@ -17465,6 +17494,8 @@ static struct unit_test_suite cryptodev_aes_gcm_auth_testsuite  = {
 			test_AES_GCM_auth_decryption_test_case_256_6),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_auth_decryption_test_case_256_7),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_decryption_test_case_256_8),
 
 		/** AES GCM Authenticated Encryption big aad size */
 		TEST_CASE_ST(ut_setup, ut_teardown,
diff --git a/app/test/test_cryptodev_aead_test_vectors.h b/app/test/test_cryptodev_aead_test_vectors.h
index 9213e496db..35ae64c194 100644
--- a/app/test/test_cryptodev_aead_test_vectors.h
+++ b/app/test/test_cryptodev_aead_test_vectors.h
@@ -18,6 +18,16 @@ static uint8_t gcm_aad_text[MAX_AAD_LENGTH] = {
 		0x00, 0xf1, 0xe2, 0xd3, 0xc4, 0xb5, 0xa6, 0x97,
 		0x88, 0x79, 0x6a, 0x5b, 0x4c, 0x3d, 0x2e, 0x1f };
 
+static uint8_t gcm_aad_64B_text[MAX_AAD_LENGTH] = {
+		0xED, 0x3E, 0xA8, 0x1F, 0x74, 0xE5, 0xD1, 0x96,
+		0xA4, 0xD5, 0x4B, 0x26, 0xBB, 0x20, 0x61, 0x7B,
+		0x3B, 0x9C, 0x2A, 0x69, 0x90, 0xEF, 0xD7, 0x9A,
+		0x94, 0xC2, 0xF5, 0x86, 0xBD, 0x00, 0xF6, 0xEA,
+		0x0B, 0x14, 0x24, 0xF2, 0x08, 0x67, 0x42, 0x3A,
+		0xB5, 0xB8, 0x32, 0x97, 0xB5, 0x99, 0x69, 0x75,
+		0x60, 0x00, 0x8F, 0xF7, 0x6F, 0x16, 0x52, 0x66,
+		0xF1, 0xA9, 0x38, 0xFD, 0xB0, 0x61, 0x60, 0xB5 };
+
 static uint8_t ccm_aad_test_1[8] = {
 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
 };
@@ -1737,6 +1747,58 @@ static const struct aead_test_data gcm_test_case_256_7 = {
 	}
 };
 
+static const struct aead_test_data gcm_test_case_256_8 = {
+	.algo = RTE_CRYPTO_AEAD_AES_GCM,
+	.key = {
+		.data = {
+			0xD8, 0xFD, 0x8F, 0x5A, 0x13, 0x7B, 0x05, 0x2C,
+			0xA4, 0x64, 0x7A, 0xDD, 0x1E, 0x9A, 0x68, 0x33,
+			0x04, 0x70, 0xE8, 0x1E, 0x42, 0x84, 0x64, 0xD2,
+			0x23, 0xA1, 0x6A, 0x0A, 0x05, 0x7B, 0x90, 0xDE},
+		.len = 32
+	},
+	.iv = {
+		.data = {
+			0x8D, 0xDF, 0xB8, 0x7F, 0xD0, 0x79, 0x77, 0x55,
+			0xD5, 0x48, 0x03, 0x05},
+		.len = 12
+	},
+	.aad = {
+		.data = gcm_aad_64B_text,
+		.len = 64
+	},
+	.plaintext = {
+		.data = {
+			0x4D, 0xBC, 0x2C, 0x7F, 0x25, 0x1F, 0x07, 0x25,
+			0x54, 0x8C, 0x43, 0xDB, 0xD8, 0x06, 0x9F, 0xBF,
+			0xCA, 0x60, 0xF4, 0xEF, 0x13, 0x87, 0xE8, 0x2F,
+			0x4D, 0x9D, 0x1D, 0x87, 0x9F, 0x91, 0x79, 0x7E,
+			0x3E, 0x98, 0xA3, 0x63, 0xC6, 0xFE, 0xDB, 0x35,
+			0x96, 0x59, 0xB2, 0x0C, 0x80, 0x96, 0x70, 0x07,
+			0x87, 0x42, 0xAB, 0x4F, 0x31, 0x73, 0xC4, 0xF9,
+			0xB0, 0x1E, 0xF1, 0xBC, 0x7D, 0x45, 0xE5, 0xF3},
+		.len = 64
+	},
+	.ciphertext = {
+	    .data = {
+			0x21, 0xFA, 0x59, 0x4F, 0x1F, 0x6B, 0x19, 0xC2,
+			0x68, 0xBC, 0x05, 0x93, 0x4E, 0x48, 0x6C, 0x5B,
+			0x0B, 0x7A, 0x43, 0xB7, 0x60, 0x8E, 0x00, 0xC4,
+			0xAB, 0x14, 0x6B, 0xCC, 0xA1, 0x27, 0x6A, 0xDE,
+			0x8E, 0xB6, 0x98, 0xBB, 0x4F, 0xD0, 0x6F, 0x30,
+			0x0F, 0x04, 0xA8, 0x5B, 0xDC, 0xD8, 0xE8, 0x8A,
+			0x73, 0xD9, 0xB8, 0x60, 0x7C, 0xE4, 0x32, 0x4C,
+			0x3A, 0x0B, 0xC2, 0x82, 0xDA, 0x88, 0x17, 0x69},
+	    .len = 64
+	},
+	.auth_tag = {
+		.data = {
+			0x3B, 0x80, 0x83, 0x72, 0xE5, 0x1B, 0x94, 0x15,
+			0x75, 0xC8, 0x62, 0xBC, 0xA1, 0x66, 0x91, 0x45},
+		.len = 16
+	}
+};
+
 /** variable AAD AES-GCM-128 Test Vectors */
 static const struct aead_test_data gcm_test_case_aad_1 = {
 	.algo = RTE_CRYPTO_AEAD_AES_GCM,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 47+ messages in thread

* RE: [EXTERNAL] [PATCH v8 0/3] add QAT GEN LCE device
  2024-02-29 19:45 ` [PATCH v8 0/3] add QAT GEN LCE device Ciara Power
                     ` (2 preceding siblings ...)
  2024-02-29 19:45   ` [PATCH v8 3/3] test/cryptodev: add tests for GCM with 64 byte AAD Ciara Power
@ 2024-03-01  6:12   ` Akhil Goyal
  3 siblings, 0 replies; 47+ messages in thread
From: Akhil Goyal @ 2024-03-01  6:12 UTC (permalink / raw)
  To: Ciara Power, dev; +Cc: kai.ji, arkadiuszx.kusztal, rakesh.s.joshi

> This patchset adds a new QAT LCE device.
> The device currently only supports symmetric crypto,
> and only the AES-GCM algorithm.
> 
> v8: Rebased on latest next-crypto-for-main.
> v7:
>   - Squashed patch 1 and 2.
>   - Fixed formatting to leverage 100 char line limit.
>   - Removed unnecessary whitespace and indent changes.
>   - Fixed copyright year typo on new file.
>   - Added second developer to commit message signed-off tags.
> v6:
>   - Added documentation and release note changes.
>   - Removed unused device PCI ID.
> v5:
>   - Fixed compilation issue by replacing __u8 with uint8_t.
> v4:
>   - Fixed cover letter, v3 included the wrong details relating
>     to another patchset.
> v3:
>   - Fixed typos in commit and code comments.
>   - Replaced use of linux/kernel.h macro with local macro
>     to fix ARM compilation in CI.
> v2:
>    - Renamed device from GEN 5 to GEN LCE.
>    - Removed unused code.
>    - Updated macro names.
> 
> Nishikant Nayak (3):
>   common/qat: add support for GEN LCE device
>   crypto/qat: update headers for GEN LCE support
>   test/cryptodev: add tests for GCM with 64 byte AAD
> 
>  .mailmap                                      |   1 +
>  app/test/test_cryptodev.c                     |  43 ++-
>  app/test/test_cryptodev_aead_test_vectors.h   |  62 ++++
>  doc/guides/cryptodevs/qat.rst                 |   1 +
>  doc/guides/rel_notes/release_24_03.rst        |   1 +
>  drivers/common/qat/dev/qat_dev_gen_lce.c      | 295 +++++++++++++++++
>  drivers/common/qat/meson.build                |   2 +
>  .../qat/qat_adf/adf_transport_access_macros.h |   1 +
>  .../adf_transport_access_macros_gen_lce.h     |  51 +++
>  .../adf_transport_access_macros_gen_lcevf.h   |  48 +++
>  drivers/common/qat/qat_adf/icp_qat_fw.h       |  34 ++
>  drivers/common/qat/qat_adf/icp_qat_fw_la.h    |  59 +++-
>  drivers/common/qat/qat_common.h               |   1 +
>  drivers/common/qat/qat_device.c               |   5 +
>  .../crypto/qat/dev/qat_crypto_pmd_gen_lce.c   | 310 ++++++++++++++++++
>  drivers/crypto/qat/qat_sym.c                  |  14 +-
>  drivers/crypto/qat/qat_sym.h                  |  57 +++-
>  drivers/crypto/qat/qat_sym_session.c          |  57 +++-
>  drivers/crypto/qat/qat_sym_session.h          |  10 +-
>  19 files changed, 1037 insertions(+), 15 deletions(-)
>  create mode 100644 drivers/common/qat/dev/qat_dev_gen_lce.c
>  create mode 100644
> drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
>  create mode 100644
> drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
>  create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
> 
Applied to dpdk-next-crypto
Thanks.

^ permalink raw reply	[flat|nested] 47+ messages in thread

end of thread, other threads:[~2024-03-01  6:12 UTC | newest]

Thread overview: 47+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-12-20 13:26 [PATCH 1/4] common/qat: add files specific to GEN5 Nishikant Nayak
2023-12-20 13:26 ` [PATCH 2/4] common/qat: update common driver to support GEN5 Nishikant Nayak
2023-12-20 13:26 ` [PATCH 3/4] crypto/qat: update headers for GEN5 support Nishikant Nayak
2023-12-20 13:26 ` [PATCH 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
2024-02-23 15:17 ` [PATCH 1/4] common/qat: add files specific to GEN5 Power, Ciara
2024-02-26 13:03 ` [PATCH v2 0/4] add QAT GEN LCE device Nishikant Nayak
2024-02-26 13:03   ` [PATCH v2 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
2024-02-26 13:03   ` [PATCH v2 2/4] common/qat: update common driver to support " Nishikant Nayak
2024-02-26 13:03   ` [PATCH v2 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
2024-02-26 13:03   ` [PATCH v2 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
2024-02-27  9:35 ` [PATCH v3 0/4] add new QAT gen3 and gen5 Nishikant Nayak
2024-02-27  9:35   ` [PATCH v3 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
2024-02-27  9:35   ` [PATCH v3 2/4] common/qat: update common driver to support " Nishikant Nayak
2024-02-27  9:35   ` [PATCH v3 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
2024-02-27  9:35   ` [PATCH v3 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
2024-02-27  9:40 ` [PATCH v4 0/4] add QAT GEN LCE device Nishikant Nayak
2024-02-27  9:40   ` [PATCH v4 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
2024-02-27  9:40   ` [PATCH v4 2/4] common/qat: update common driver to support " Nishikant Nayak
2024-02-27  9:40   ` [PATCH v4 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
2024-02-27  9:40   ` [PATCH v4 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
2024-02-27  9:54   ` [PATCH v4 0/4] add QAT GEN LCE device Power, Ciara
2024-02-29  9:47     ` Kusztal, ArkadiuszX
2024-02-27 11:33 ` [PATCH v5 " Nishikant Nayak
2024-02-27 11:33   ` [PATCH v5 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
2024-02-27 11:33   ` [PATCH v5 2/4] common/qat: update common driver to support " Nishikant Nayak
2024-02-27 11:33   ` [PATCH v5 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
2024-02-27 11:33   ` [PATCH v5 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
2024-02-28 14:00 ` [PATCH v6 0/4] add QAT GEN LCE device Nishikant Nayak
2024-02-28 14:00   ` [PATCH v6 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
2024-02-29 16:09     ` [EXT] " Akhil Goyal
2024-02-29 16:14     ` Akhil Goyal
2024-02-29 16:30       ` Power, Ciara
2024-02-28 14:00   ` [PATCH v6 2/4] common/qat: update common driver to support " Nishikant Nayak
2024-02-28 14:00   ` [PATCH v6 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
2024-02-29 16:04     ` [EXT] " Akhil Goyal
2024-02-28 14:00   ` [PATCH v6 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
2024-02-29 15:52     ` [EXT] " Akhil Goyal
2024-02-29 16:32       ` Power, Ciara
2024-02-29 18:43 ` [PATCH v7 0/3] add QAT GEN LCE device Ciara Power
2024-02-29 18:43   ` [PATCH v7 1/3] common/qat: add support for " Ciara Power
2024-02-29 18:43   ` [PATCH v7 2/3] crypto/qat: update headers for GEN LCE support Ciara Power
2024-02-29 18:43   ` [PATCH v7 3/3] test/cryptodev: add tests for GCM with 64 byte AAD Ciara Power
2024-02-29 19:45 ` [PATCH v8 0/3] add QAT GEN LCE device Ciara Power
2024-02-29 19:45   ` [PATCH v8 1/3] common/qat: add support for " Ciara Power
2024-02-29 19:45   ` [PATCH v8 2/3] crypto/qat: update headers for GEN LCE support Ciara Power
2024-02-29 19:45   ` [PATCH v8 3/3] test/cryptodev: add tests for GCM with 64 byte AAD Ciara Power
2024-03-01  6:12   ` [EXTERNAL] [PATCH v8 0/3] add QAT GEN LCE device Akhil Goyal

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).