From: Nishikant Nayak <nishikanta.nayak@intel.com>
To: dev@dpdk.org
Cc: ciara.power@intel.com, kai.ji@intel.com,
arkadiuszx.kusztal@intel.com, rakesh.s.joshi@intel.com,
Nishikant Nayak <nishikanta.nayak@intel.com>,
Thomas Monjalon <thomas@monjalon.net>,
Anatoly Burakov <anatoly.burakov@intel.com>
Subject: [PATCH v2 1/4] common/qat: add files specific to GEN LCE
Date: Mon, 26 Feb 2024 13:03:39 +0000 [thread overview]
Message-ID: <20240226130342.4115292-2-nishikanta.nayak@intel.com> (raw)
In-Reply-To: <20240226130342.4115292-1-nishikanta.nayak@intel.com>
Adding GEN5 files for handling GEN LCE specific operaions.
These files are inherited from the existing files/APIs
which has some changes specific GEN5 requirements
Also updated the mailmap file.
Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
---
v2:
- Renamed device from GEN 5 to GEN LCE.
- Removed unused code.
- Updated macro names.
---
---
.mailmap | 1 +
drivers/common/qat/dev/qat_dev_gen_lce.c | 306 ++++++++++++++++
drivers/common/qat/meson.build | 2 +
.../adf_transport_access_macros_gen_lce.h | 51 +++
.../adf_transport_access_macros_gen_lcevf.h | 48 +++
drivers/common/qat/qat_adf/icp_qat_fw_la.h | 14 +
drivers/common/qat/qat_common.h | 1 +
.../crypto/qat/dev/qat_crypto_pmd_gen_lce.c | 329 ++++++++++++++++++
drivers/crypto/qat/qat_sym.h | 6 +
9 files changed, 758 insertions(+)
create mode 100644 drivers/common/qat/dev/qat_dev_gen_lce.c
create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
diff --git a/.mailmap b/.mailmap
index 58cca13ef6..8008e5a899 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1036,6 +1036,7 @@ Ning Li <muziding001@163.com> <lining18@jd.com>
Nipun Gupta <nipun.gupta@amd.com> <nipun.gupta@nxp.com>
Nir Efrati <nir.efrati@intel.com>
Nirmoy Das <ndas@suse.de>
+Nishikant Nayak <nishikanta.nayak@intel.com>
Nithin Dabilpuram <ndabilpuram@marvell.com> <nithin.dabilpuram@caviumnetworks.com>
Nitin Saxena <nitin.saxena@caviumnetworks.com>
Nitzan Weller <nitzanwe@mellanox.com>
diff --git a/drivers/common/qat/dev/qat_dev_gen_lce.c b/drivers/common/qat/dev/qat_dev_gen_lce.c
new file mode 100644
index 0000000000..4cef0b8be2
--- /dev/null
+++ b/drivers/common/qat/dev/qat_dev_gen_lce.c
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_pci.h>
+#include <rte_vfio.h>
+
+#include "qat_device.h"
+#include "qat_qp.h"
+#include "adf_transport_access_macros_gen_lcevf.h"
+#include "adf_pf2vf_msg.h"
+#include "qat_pf2vf.h"
+
+#include <stdint.h>
+#include <linux/kernel.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#define BITS_PER_ULONG (sizeof(unsigned long) * 8)
+
+#define VFIO_PCI_LCE_DEVICE_CFG_REGION_INDEX VFIO_PCI_NUM_REGIONS
+#define VFIO_PCI_LCE_CY_CFG_REGION_INDEX (VFIO_PCI_NUM_REGIONS + 2)
+#define VFIO_PCI_LCE_RING_CFG_REGION_INDEX (VFIO_PCI_NUM_REGIONS + 4)
+#define LCE_DEVICE_NAME_SIZE 64
+#define LCE_DEVICE_MAX_BANKS 2080
+#define LCE_DEVICE_BITMAP_SIZE \
+ __KERNEL_DIV_ROUND_UP(LCE_DEVICE_MAX_BANKS, BITS_PER_ULONG)
+
+/* QAT GEN_LCE specific macros */
+#define QAT_GEN_LCE_BUNDLE_NUM LCE_DEVICE_MAX_BANKS
+#define QAT_GEN4_QPS_PER_BUNDLE_NUM 1
+
+/**
+ * struct lce_vfio_dev_cap - LCE device capabilities
+ *
+ * Device level capabilities and service level capabilities
+ */
+struct lce_vfio_dev_cap {
+ uint16_t device_num;
+ uint16_t device_type;
+ uint32_t capability_mask;
+ uint32_t extended_capabilities;
+ uint16_t max_banks;
+ uint16_t max_rings_per_bank;
+ uint16_t arb_mask;
+ uint16_t services;
+ uint16_t pkg_id;
+ uint16_t node_id;
+ __u8 device_name[LCE_DEVICE_NAME_SIZE];
+};
+
+/* struct lce_vfio_dev_cy_cap - CY capabilities of LCE device */
+struct lce_vfio_dev_cy_cap {
+ uint32_t nr_banks;
+ unsigned long bitmap[LCE_DEVICE_BITMAP_SIZE];
+};
+
+struct lce_qat_domain {
+ uint32_t nid :3;
+ uint32_t fid :7;
+ uint32_t ftype :2;
+ uint32_t vfid :13;
+ uint32_t rid :4;
+ uint32_t vld :1;
+ uint32_t desc_over :1;
+ uint32_t pasid_vld :1;
+ uint32_t pasid :20;
+};
+
+struct lce_qat_buf_domain {
+ uint32_t bank_id: 20;
+ uint32_t type: 4;
+ uint32_t resv: 8;
+ struct lce_qat_domain dom;
+};
+
+struct qat_dev_gen_lce_extra {
+ struct qat_qp_hw_data
+ qp_gen_lce_data[QAT_GEN_LCE_BUNDLE_NUM][QAT_GEN4_QPS_PER_BUNDLE_NUM];
+};
+
+static struct qat_pf2vf_dev qat_pf2vf_gen_lce = {
+ .pf2vf_offset = ADF_4XXXIOV_PF2VM_OFFSET,
+ .vf2pf_offset = ADF_4XXXIOV_VM2PF_OFFSET,
+ .pf2vf_type_shift = ADF_PFVF_2X_MSGTYPE_SHIFT,
+ .pf2vf_type_mask = ADF_PFVF_2X_MSGTYPE_MASK,
+ .pf2vf_data_shift = ADF_PFVF_2X_MSGDATA_SHIFT,
+ .pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK,
+};
+
+static int
+qat_select_valid_queue_gen_lce(struct qat_pci_device *qat_dev, int qp_id,
+ enum qat_service_type service_type)
+{
+ int i = 0, valid_qps = 0;
+ struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+ for (; i < QAT_GEN_LCE_BUNDLE_NUM; i++) {
+ if (dev_extra->qp_gen_lce_data[i][0].service_type ==
+ service_type) {
+ if (valid_qps == qp_id)
+ return i;
+ ++valid_qps;
+ }
+ }
+ return -1;
+}
+
+static const struct qat_qp_hw_data *
+qat_qp_get_hw_data_gen_lce(struct qat_pci_device *qat_dev,
+ enum qat_service_type service_type, uint16_t qp_id)
+{
+ struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+ int ring_pair = qat_select_valid_queue_gen_lce(qat_dev, qp_id,
+ service_type);
+
+ if (ring_pair < 0)
+ return NULL;
+
+ return &dev_extra->qp_gen_lce_data[ring_pair][0];
+}
+
+static int
+qat_qp_rings_per_service_gen_lce(struct qat_pci_device *qat_dev,
+ enum qat_service_type service)
+{
+ int i = 0, count = 0, max_ops_per_srv = 0;
+ struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+ max_ops_per_srv = QAT_GEN_LCE_BUNDLE_NUM;
+ for (i = 0, count = 0; i < max_ops_per_srv; i++)
+ if (dev_extra->qp_gen_lce_data[i][0].service_type == service)
+ count++;
+ return count;
+}
+
+static int qat_dev_read_config_gen_lce(struct qat_pci_device *qat_dev)
+{
+ struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+ struct qat_qp_hw_data *hw_data;
+
+ /** Enable only crypto ring: RP-0 */
+ hw_data = &dev_extra->qp_gen_lce_data[0][0];
+ memset(hw_data, 0, sizeof(*hw_data));
+
+ hw_data->service_type = QAT_SERVICE_SYMMETRIC;
+ hw_data->tx_msg_size = 128;
+ hw_data->rx_msg_size = 32;
+
+ hw_data->tx_ring_num = 0;
+ hw_data->rx_ring_num = 1;
+
+ hw_data->hw_bundle_num = 0;
+
+ return 0;
+}
+
+static void qat_qp_build_ring_base_gen_lce(void *io_addr, struct qat_queue *queue)
+{
+ uint64_t queue_base;
+
+ queue_base = BUILD_RING_BASE_ADDR_GEN_LCE(queue->base_phys_addr,
+ queue->queue_size);
+ WRITE_CSR_RING_BASE_GEN_LCEVF(io_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_base);
+}
+
+static void
+qat_qp_adf_arb_enable_gen_lce(const struct qat_queue *txq,
+ void *base_addr, rte_spinlock_t *lock)
+{
+ uint32_t arb_csr_offset = 0, value;
+
+ rte_spinlock_lock(lock);
+ arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+ (ADF_RING_BUNDLE_SIZE_GEN_LCE *
+ txq->hw_bundle_number);
+ value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,
+ arb_csr_offset);
+ value |= 0x01;
+ ADF_CSR_WR(base_addr, arb_csr_offset, value);
+ rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_arb_disable_gen_lce(const struct qat_queue *txq,
+ void *base_addr, rte_spinlock_t *lock)
+{
+ uint32_t arb_csr_offset = 0, value;
+
+ rte_spinlock_lock(lock);
+ arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_RING_BUNDLE_SIZE_GEN_LCE *
+ txq->hw_bundle_number);
+ value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,
+ arb_csr_offset);
+ value &= ~(0x01);
+ ADF_CSR_WR(base_addr, arb_csr_offset, value);
+ rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_configure_queues_gen_lce(struct qat_qp *qp)
+{
+ uint32_t q_tx_config, q_resp_config;
+ struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q;
+
+ /* q_tx/rx->queue_size is initialized as per bundle config register */
+ q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size);
+
+ q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size,
+ ADF_RING_NEAR_WATERMARK_512,
+ ADF_RING_NEAR_WATERMARK_0);
+
+ WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_tx->hw_bundle_number,
+ q_tx->hw_queue_number, q_tx_config);
+ WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_rx->hw_bundle_number,
+ q_rx->hw_queue_number, q_resp_config);
+}
+
+static void
+qat_qp_csr_write_tail_gen_lce(struct qat_qp *qp, struct qat_queue *q)
+{
+ WRITE_CSR_RING_TAIL_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+ q->hw_queue_number, q->tail);
+}
+
+static void
+qat_qp_csr_write_head_gen_lce(struct qat_qp *qp, struct qat_queue *q,
+ uint32_t new_head)
+{
+ WRITE_CSR_RING_HEAD_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+ q->hw_queue_number, new_head);
+}
+
+static void
+qat_qp_csr_setup_gen_lce(struct qat_pci_device *qat_dev, void *io_addr,
+ struct qat_qp *qp)
+{
+ qat_qp_build_ring_base_gen_lce(io_addr, &qp->tx_q);
+ qat_qp_build_ring_base_gen_lce(io_addr, &qp->rx_q);
+ qat_qp_adf_configure_queues_gen_lce(qp);
+ qat_qp_adf_arb_enable_gen_lce(&qp->tx_q, qp->mmap_bar_addr,
+ &qat_dev->arb_csr_lock);
+}
+
+static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen_lce = {
+ .qat_qp_rings_per_service = qat_qp_rings_per_service_gen_lce,
+ .qat_qp_build_ring_base = qat_qp_build_ring_base_gen_lce,
+ .qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen_lce,
+ .qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen_lce,
+ .qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen_lce,
+ .qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen_lce,
+ .qat_qp_csr_write_head = qat_qp_csr_write_head_gen_lce,
+ .qat_qp_csr_setup = qat_qp_csr_setup_gen_lce,
+ .qat_qp_get_hw_data = qat_qp_get_hw_data_gen_lce,
+};
+
+static int
+qat_reset_ring_pairs_gen_lce(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+static const struct rte_mem_resource*
+qat_dev_get_transport_bar_gen_lce(struct rte_pci_device *pci_dev)
+{
+ return &pci_dev->mem_resource[0];
+}
+
+static int
+qat_dev_get_misc_bar_gen_lce(struct rte_mem_resource **mem_resource,
+ struct rte_pci_device *pci_dev)
+{
+ *mem_resource = &pci_dev->mem_resource[2];
+ return 0;
+}
+
+static int
+qat_dev_get_extra_size_gen_lce(void)
+{
+ return sizeof(struct qat_dev_gen_lce_extra);
+}
+
+static int
+qat_dev_get_slice_map_gen_lce(uint32_t *map __rte_unused,
+ const struct rte_pci_device *pci_dev __rte_unused)
+{
+ return 0;
+}
+
+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen_lce = {
+ .qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen_lce,
+ .qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen_lce,
+ .qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen_lce,
+ .qat_dev_read_config = qat_dev_read_config_gen_lce,
+ .qat_dev_get_extra_size = qat_dev_get_extra_size_gen_lce,
+ .qat_dev_get_slice_map = qat_dev_get_slice_map_gen_lce,
+};
+
+RTE_INIT(qat_dev_gen_lce_init)
+{
+ qat_qp_hw_spec[QAT_GEN_LCE] = &qat_qp_hw_spec_gen_lce;
+ qat_dev_hw_spec[QAT_GEN_LCE] = &qat_dev_hw_spec_gen_lce;
+ qat_gen_config[QAT_GEN_LCE].dev_gen = QAT_GEN_LCE;
+ qat_gen_config[QAT_GEN_LCE].pf2vf_dev = &qat_pf2vf_gen_lce;
+}
diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 62abcb6fe3..bc7c3e5b85 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -82,6 +82,7 @@ sources += files(
'dev/qat_dev_gen2.c',
'dev/qat_dev_gen3.c',
'dev/qat_dev_gen4.c',
+ 'dev/qat_dev_gen_lce.c',
)
includes += include_directories(
'qat_adf',
@@ -108,6 +109,7 @@ if qat_crypto
'dev/qat_crypto_pmd_gen2.c',
'dev/qat_crypto_pmd_gen3.c',
'dev/qat_crypto_pmd_gen4.c',
+ 'dev/qat_crypto_pmd_gen_lce.c',
]
sources += files(join_paths(qat_crypto_relpath, f))
endforeach
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
new file mode 100644
index 0000000000..c9df8f5dd2
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+
+#include "adf_transport_access_macros.h"
+
+#define ADF_RINGS_PER_INT_SRCSEL_GEN4 2
+#define ADF_BANK_INT_SRC_SEL_MASK_GEN4 0x44UL
+#define ADF_BANK_INT_FLAG_CLEAR_MASK_GEN4 0x3
+#define ADF_RING_BUNDLE_SIZE_GEN_LCE 0x2000
+#define ADF_RING_CSR_RING_CONFIG_GEN_LCE 0x1000
+#define ADF_RING_CSR_RING_LBASE_GEN_LCE 0x1040
+#define ADF_RING_CSR_RING_UBASE_GEN_LCE 0x1080
+
+#define BUILD_RING_BASE_ADDR_GEN_LCE(addr, size) \
+ ((((addr) >> 6) & (0xFFFFFFFFFFFFFFFFULL << (size))) << 6)
+
+#define WRITE_CSR_RING_BASE_GEN_LCE(csr_base_addr, bank, ring, value) \
+do { \
+ uint32_t l_base = 0, u_base = 0; \
+ l_base = (uint32_t)(value & 0xFFFFFFFF); \
+ u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+ ADF_CSR_WR(csr_base_addr, \
+ (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+ ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2), \
+ l_base); \
+ ADF_CSR_WR(csr_base_addr, \
+ (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+ ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2), \
+ u_base); \
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCE(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, \
+ (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+ ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCE(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR((u8 *)(csr_base_addr), \
+ (ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+
+#define WRITE_CSR_RING_HEAD_GEN_LCE(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR((u8 *)(csr_base_addr), \
+ (ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+
+#endif
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
new file mode 100644
index 0000000000..3c7232de12
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+
+#include "adf_transport_access_macros.h"
+#include "adf_transport_access_macros_gen_lce.h"
+
+#define ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF 0x0
+
+#define WRITE_CSR_RING_BASE_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+do { \
+ uint32_t l_base = 0, u_base = 0; \
+ l_base = (uint32_t)(value & 0xFFFFFFFF); \
+ u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+ ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+ (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+ ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2), \
+ l_base); \
+ ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+ (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+ ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2), \
+ u_base); \
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+ (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+ ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+ (ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_HEAD_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+ (ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_SRV_ARB_EN_GEN_LCEVF(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+ (ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+ ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+
+#endif
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index 70f0effa62..215b291b74 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -410,4 +410,18 @@ struct icp_qat_fw_la_cipher_20_req_params {
uint8_t spc_auth_res_sz;
};
+struct icp_qat_fw_la_cipher_30_req_params {
+ uint32_t spc_aad_sz;
+ uint8_t cipher_length;
+ uint8_t reserved[2];
+ uint8_t spc_auth_res_sz;
+ union {
+ uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ struct {
+ uint64_t cipher_IV_ptr;
+ uint64_t resrvd1;
+ } s;
+
+ } u;
+};
#endif
diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h
index 9411a79301..642e009f28 100644
--- a/drivers/common/qat/qat_common.h
+++ b/drivers/common/qat/qat_common.h
@@ -21,6 +21,7 @@ enum qat_device_gen {
QAT_GEN2,
QAT_GEN3,
QAT_GEN4,
+ QAT_GEN_LCE,
QAT_N_GENS
};
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
new file mode 100644
index 0000000000..cdd852600d
--- /dev/null
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
@@ -0,0 +1,329 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
+#include "qat_sym_session.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
+#include "qat_crypto.h"
+#include "qat_crypto_pmd_gens.h"
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen_lce[] = {
+ QAT_SYM_AEAD_CAP(AES_GCM,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 32, 32, 0), CAP_RNG(digest_size, 16, 16, 0),
+ CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+qat_sgl_add_buffer_gen_lce(void *list_in, uint64_t addr, uint32_t len)
+{
+ struct qat_sgl *list = (struct qat_sgl *)list_in;
+ uint32_t nr;
+
+ nr = list->num_bufs;
+
+ if (nr >= QAT_SYM_SGL_MAX_NUMBER) {
+ QAT_DP_LOG(ERR, "Adding %d entry failed, no empty SGL buffer", nr);
+ return -EINVAL;
+ }
+
+ list->buffers[nr].len = len;
+ list->buffers[nr].resrvd = 0;
+ list->buffers[nr].addr = addr;
+
+ list->num_bufs++;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+ QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+ nr, list->buffers[nr].len, list->buffers[nr].addr);
+#endif
+ return 0;
+}
+
+static int
+qat_sgl_fill_array_with_mbuf(struct rte_mbuf *buf, int64_t offset,
+ void *list_in, uint32_t data_len)
+{
+ struct qat_sgl *list = (struct qat_sgl *)list_in;
+ uint32_t nr, buf_len;
+ int res = -EINVAL;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ uint32_t start_idx;
+ start_idx = list->num_bufs;
+#endif
+
+ /* Append to the existing list */
+ nr = list->num_bufs;
+
+ for (buf_len = 0; buf && nr < QAT_SYM_SGL_MAX_NUMBER; buf = buf->next) {
+ if (offset >= rte_pktmbuf_data_len(buf)) {
+ offset -= rte_pktmbuf_data_len(buf);
+ /* Jump to next mbuf */
+ continue;
+ }
+
+ list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset;
+ list->buffers[nr].resrvd = 0;
+ list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
+
+ offset = 0;
+ buf_len += list->buffers[nr].len;
+
+ if (buf_len >= data_len) {
+ list->buffers[nr].len -= buf_len - data_len;
+ res = 0;
+ break;
+ }
+ ++nr;
+ }
+
+ if (unlikely(res != 0)) {
+ if (nr == QAT_SYM_SGL_MAX_NUMBER)
+ QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+ QAT_SYM_SGL_MAX_NUMBER);
+ else
+ QAT_DP_LOG(ERR, "Mbuf chain is too short");
+ } else {
+
+ list->num_bufs = ++nr;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+ for (nr = start_idx; nr < list->num_bufs; nr++) {
+ QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+ nr, list->buffers[nr].len,
+ list->buffers[nr].addr);
+ }
+#endif
+ }
+
+ return res;
+}
+
+static int
+qat_sym_build_op_aead_gen_lce(void *in_op, struct qat_sym_session *ctx,
+ uint8_t *out_msg, void *op_cookie)
+{
+ struct qat_sym_op_cookie *cookie = op_cookie;
+ struct rte_crypto_op *op = in_op;
+ uint64_t digest_phys_addr, aad_phys_addr;
+ uint16_t iv_len, aad_len, digest_len, key_len;
+ uint32_t cipher_ofs, iv_offset, cipher_len;
+ register struct icp_qat_fw_la_bulk_req *qat_req;
+ struct icp_qat_fw_la_cipher_30_req_params *cipher_param;
+ enum icp_qat_hw_cipher_dir dir;
+ bool is_digest_adjacent = false;
+
+ if (ctx->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER ||
+ ctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_AES256 ||
+ ctx->qat_mode != ICP_QAT_HW_CIPHER_AEAD_MODE) {
+
+ QAT_DP_LOG(ERR, "Not supported (cmd: %d, alg: %d, mode: %d). "
+ "GEN_LCE PMD only supports AES-256 AEAD mode",
+ ctx->qat_cmd, ctx->qat_cipher_alg, ctx->qat_mode);
+ return -EINVAL;
+ }
+
+ qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+ rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+ cipher_param = (void *)&qat_req->serv_specif_rqpars;
+
+ dir = ctx->qat_dir;
+
+ aad_phys_addr = op->sym->aead.aad.phys_addr;
+ aad_len = ctx->aad_len;
+
+ iv_offset = ctx->cipher_iv.offset;
+ iv_len = ctx->cipher_iv.length;
+
+ cipher_ofs = op->sym->aead.data.offset;
+ cipher_len = op->sym->aead.data.length;
+
+ digest_phys_addr = op->sym->aead.digest.phys_addr;
+ digest_len = ctx->digest_length;
+
+ /* Upto 16B IV can be directly embedded in descriptor.
+ * GCM supports only 12B IV for GEN LCE
+ */
+ if (iv_len != GCM_IV_LENGTH_GEN_LCE) {
+ QAT_DP_LOG(ERR, "iv_len: %d not supported. Must be 12B.",
+ iv_len);
+ return -EINVAL;
+ }
+
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ rte_crypto_op_ctod_offset(op, uint8_t*, iv_offset),
+ iv_len);
+
+ /* Always SGL */
+ RTE_ASSERT((qat_req->comn_hdr.comn_req_flags &
+ ICP_QAT_FW_SYM_COMM_ADDR_SGL) == 1);
+ /* Always inplace */
+ RTE_ASSERT(op->sym->m_dst == NULL);
+
+ /* Key buffer address is already programmed by reusing the
+ * content-descriptor buffer
+ */
+ key_len = ctx->auth_key_length;
+
+ cipher_param->spc_aad_sz = aad_len;
+ cipher_param->cipher_length = key_len;
+ cipher_param->spc_auth_res_sz = digest_len;
+
+ /* Knowing digest is contiguous to cipher-text helps optimizing SGL */
+ if (rte_pktmbuf_iova_offset(op->sym->m_src, cipher_ofs + cipher_len)
+ == digest_phys_addr)
+ is_digest_adjacent = true;
+
+ /* SRC-SGL: 3 entries:
+ * a) AAD
+ * b) cipher
+ * c) digest (only for decrypt and buffer is_NOT_adjacent)
+ *
+ */
+ cookie->qat_sgl_src.num_bufs = 0;
+ if (aad_len)
+ qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src, aad_phys_addr,
+ aad_len);
+
+ if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+ qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+ &cookie->qat_sgl_src,
+ cipher_len + digest_len);
+ } else {
+ qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+ &cookie->qat_sgl_src,
+ cipher_len);
+
+ /* Digest buffer in decrypt job */
+ if (dir == ICP_QAT_HW_CIPHER_DECRYPT)
+ qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src,
+ digest_phys_addr, digest_len);
+ }
+
+ /* (in-place) DST-SGL: 2 entries:
+ * a) cipher
+ * b) digest (only for encrypt and buffer is_NOT_adjacent)
+ */
+ cookie->qat_sgl_dst.num_bufs = 0;
+
+ if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+ qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+ &cookie->qat_sgl_dst,
+ cipher_len + digest_len);
+ } else {
+ qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,
+ &cookie->qat_sgl_dst,
+ cipher_len);
+
+ /* Digest buffer in Encrypt job */
+ if (dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+ qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_dst,
+ digest_phys_addr, digest_len);
+ }
+
+ /* Length values in 128B descriptor */
+ qat_req->comn_mid.src_length = cipher_len;
+ qat_req->comn_mid.dst_length = cipher_len;
+
+ if (dir == ICP_QAT_HW_CIPHER_ENCRYPT) /* Digest buffer in Encrypt job */
+ qat_req->comn_mid.dst_length += GCM_256_DIGEST_LEN;
+
+ /* src & dst SGL addresses in 128B descriptor */
+ qat_req->comn_mid.src_data_addr = cookie->qat_sgl_src_phys_addr;
+ qat_req->comn_mid.dest_data_addr = cookie->qat_sgl_dst_phys_addr;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+ sizeof(struct icp_qat_fw_la_bulk_req));
+ QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
+ digest_len);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data, aad_len);
+#endif
+ return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen_lce(void *cdev __rte_unused, void *session)
+{
+ struct qat_sym_session *ctx = session;
+ qat_sym_build_request_t build_request = NULL;
+ enum rte_proc_type_t proc_type = rte_eal_process_type();
+
+ if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
+ return -EINVAL;
+
+ /* build request for aead */
+ if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES256 &&
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) {
+ build_request = qat_sym_build_op_aead_gen_lce;
+ ctx->build_request[proc_type] = build_request;
+ }
+ return 0;
+}
+
+
+static int
+qat_sym_crypto_cap_get_gen_lce(struct qat_cryptodev_private *internals,
+ const char *capa_memz_name,
+ const uint16_t __rte_unused slice_map)
+{
+ const uint32_t size = sizeof(qat_sym_crypto_caps_gen_lce);
+ uint32_t i;
+
+ internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+ if (internals->capa_mz == NULL) {
+ internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+ size, rte_socket_id(), 0);
+ if (internals->capa_mz == NULL) {
+ QAT_LOG(DEBUG,
+ "Error allocating memzone for capabilities");
+ return -1;
+ }
+ }
+
+ struct rte_cryptodev_capabilities *addr =
+ (struct rte_cryptodev_capabilities *)
+ internals->capa_mz->addr;
+ const struct rte_cryptodev_capabilities *capabilities =
+ qat_sym_crypto_caps_gen_lce;
+ const uint32_t capa_num =
+ size / sizeof(struct rte_cryptodev_capabilities);
+ uint32_t curr_capa = 0;
+
+ for (i = 0; i < capa_num; i++) {
+ memcpy(addr + curr_capa, capabilities + i,
+ sizeof(struct rte_cryptodev_capabilities));
+ curr_capa++;
+ }
+ internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+ return 0;
+}
+
+RTE_INIT(qat_sym_crypto_gen_lce_init)
+{
+ qat_sym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = &qat_sym_crypto_ops_gen1;
+ qat_sym_gen_dev_ops[QAT_GEN_LCE].get_capabilities =
+ qat_sym_crypto_cap_get_gen_lce;
+ qat_sym_gen_dev_ops[QAT_GEN_LCE].set_session =
+ qat_sym_crypto_set_session_gen_lce;
+ qat_sym_gen_dev_ops[QAT_GEN_LCE].set_raw_dp_ctx = NULL;
+ qat_sym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags =
+ qat_sym_crypto_feature_flags_get_gen1;
+}
+
+RTE_INIT(qat_asym_crypto_gen_lce_init)
+{
+ qat_asym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = NULL;
+ qat_asym_gen_dev_ops[QAT_GEN_LCE].get_capabilities = NULL;
+ qat_asym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags = NULL;
+ qat_asym_gen_dev_ops[QAT_GEN_LCE].set_session = NULL;
+}
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f976009bf2..f2f197d050 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -95,6 +95,12 @@
/* Maximum data length for single pass GMAC: 2^14-1 */
#define QAT_AES_GMAC_SPC_MAX_SIZE 16383
+/* Digest length for GCM Algo is 16 bytes */
+#define GCM_256_DIGEST_LEN 16
+
+/* IV length for GCM algo is 12 bytes */
+#define GCM_IV_LENGTH_GEN_LCE 12
+
struct qat_sym_session;
struct qat_sym_sgl {
--
2.25.1
next prev parent reply other threads:[~2024-02-26 13:03 UTC|newest]
Thread overview: 47+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-12-20 13:26 [PATCH 1/4] common/qat: add files specific to GEN5 Nishikant Nayak
2023-12-20 13:26 ` [PATCH 2/4] common/qat: update common driver to support GEN5 Nishikant Nayak
2023-12-20 13:26 ` [PATCH 3/4] crypto/qat: update headers for GEN5 support Nishikant Nayak
2023-12-20 13:26 ` [PATCH 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
2024-02-23 15:17 ` [PATCH 1/4] common/qat: add files specific to GEN5 Power, Ciara
2024-02-26 13:03 ` [PATCH v2 0/4] add QAT GEN LCE device Nishikant Nayak
2024-02-26 13:03 ` Nishikant Nayak [this message]
2024-02-26 13:03 ` [PATCH v2 2/4] common/qat: update common driver to support GEN LCE Nishikant Nayak
2024-02-26 13:03 ` [PATCH v2 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
2024-02-26 13:03 ` [PATCH v2 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
2024-02-27 9:35 ` [PATCH v3 0/4] add new QAT gen3 and gen5 Nishikant Nayak
2024-02-27 9:35 ` [PATCH v3 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
2024-02-27 9:35 ` [PATCH v3 2/4] common/qat: update common driver to support " Nishikant Nayak
2024-02-27 9:35 ` [PATCH v3 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
2024-02-27 9:35 ` [PATCH v3 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
2024-02-27 9:40 ` [PATCH v4 0/4] add QAT GEN LCE device Nishikant Nayak
2024-02-27 9:40 ` [PATCH v4 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
2024-02-27 9:40 ` [PATCH v4 2/4] common/qat: update common driver to support " Nishikant Nayak
2024-02-27 9:40 ` [PATCH v4 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
2024-02-27 9:40 ` [PATCH v4 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
2024-02-27 9:54 ` [PATCH v4 0/4] add QAT GEN LCE device Power, Ciara
2024-02-29 9:47 ` Kusztal, ArkadiuszX
2024-02-27 11:33 ` [PATCH v5 " Nishikant Nayak
2024-02-27 11:33 ` [PATCH v5 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
2024-02-27 11:33 ` [PATCH v5 2/4] common/qat: update common driver to support " Nishikant Nayak
2024-02-27 11:33 ` [PATCH v5 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
2024-02-27 11:33 ` [PATCH v5 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
2024-02-28 14:00 ` [PATCH v6 0/4] add QAT GEN LCE device Nishikant Nayak
2024-02-28 14:00 ` [PATCH v6 1/4] common/qat: add files specific to GEN LCE Nishikant Nayak
2024-02-29 16:09 ` [EXT] " Akhil Goyal
2024-02-29 16:14 ` Akhil Goyal
2024-02-29 16:30 ` Power, Ciara
2024-02-28 14:00 ` [PATCH v6 2/4] common/qat: update common driver to support " Nishikant Nayak
2024-02-28 14:00 ` [PATCH v6 3/4] crypto/qat: update headers for GEN LCE support Nishikant Nayak
2024-02-29 16:04 ` [EXT] " Akhil Goyal
2024-02-28 14:00 ` [PATCH v6 4/4] test/cryptodev: add tests for GCM with AAD Nishikant Nayak
2024-02-29 15:52 ` [EXT] " Akhil Goyal
2024-02-29 16:32 ` Power, Ciara
2024-02-29 18:43 ` [PATCH v7 0/3] add QAT GEN LCE device Ciara Power
2024-02-29 18:43 ` [PATCH v7 1/3] common/qat: add support for " Ciara Power
2024-02-29 18:43 ` [PATCH v7 2/3] crypto/qat: update headers for GEN LCE support Ciara Power
2024-02-29 18:43 ` [PATCH v7 3/3] test/cryptodev: add tests for GCM with 64 byte AAD Ciara Power
2024-02-29 19:45 ` [PATCH v8 0/3] add QAT GEN LCE device Ciara Power
2024-02-29 19:45 ` [PATCH v8 1/3] common/qat: add support for " Ciara Power
2024-02-29 19:45 ` [PATCH v8 2/3] crypto/qat: update headers for GEN LCE support Ciara Power
2024-02-29 19:45 ` [PATCH v8 3/3] test/cryptodev: add tests for GCM with 64 byte AAD Ciara Power
2024-03-01 6:12 ` [EXTERNAL] [PATCH v8 0/3] add QAT GEN LCE device Akhil Goyal
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240226130342.4115292-2-nishikanta.nayak@intel.com \
--to=nishikanta.nayak@intel.com \
--cc=anatoly.burakov@intel.com \
--cc=arkadiuszx.kusztal@intel.com \
--cc=ciara.power@intel.com \
--cc=dev@dpdk.org \
--cc=kai.ji@intel.com \
--cc=rakesh.s.joshi@intel.com \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).